diff --git a/cwf/cloud/go/plugin/handlers/handlers_test.go b/cwf/cloud/go/plugin/handlers/handlers_test.go index 3c395e654730..4efeaeeb08d8 100644 --- a/cwf/cloud/go/plugin/handlers/handlers_test.go +++ b/cwf/cloud/go/plugin/handlers/handlers_test.go @@ -296,7 +296,7 @@ func TestCwfGateways(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) _ = plugin.RegisterPluginForTests(t, &plugin2.CwfOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) stateTestInit.StartTestService(t) deviceTestInit.StartTestService(t) diff --git a/cwf/cloud/go/protos/ue_sim.pb.go b/cwf/cloud/go/protos/ue_sim.pb.go index 253327a022a1..f8c9a2f06ee7 100644 --- a/cwf/cloud/go/protos/ue_sim.pb.go +++ b/cwf/cloud/go/protos/ue_sim.pb.go @@ -255,6 +255,9 @@ func (m *DisconnectResponse) GetRadiusPacket() []byte { type GenTrafficRequest struct { Imsi string `protobuf:"bytes,1,opt,name=imsi,proto3" json:"imsi,omitempty"` Volume *wrappers.StringValue `protobuf:"bytes,2,opt,name=volume,proto3" json:"volume,omitempty"` + Bitrate *wrappers.StringValue `protobuf:"bytes,3,opt,name=bitrate,proto3" json:"bitrate,omitempty"` + TimeInSecs uint64 `protobuf:"varint,4,opt,name=timeInSecs,proto3" json:"timeInSecs,omitempty"` + ReverseMode bool `protobuf:"varint,5,opt,name=reverseMode,proto3" json:"reverseMode,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -299,6 +302,66 @@ func (m *GenTrafficRequest) GetVolume() *wrappers.StringValue { return nil } +func (m *GenTrafficRequest) GetBitrate() *wrappers.StringValue { + if m != nil { + return m.Bitrate + } + return nil +} + +func (m *GenTrafficRequest) GetTimeInSecs() uint64 { + if m != nil { + return m.TimeInSecs + } + return 0 +} + +func (m *GenTrafficRequest) GetReverseMode() bool { + if m != nil { + return m.ReverseMode + } + return false +} + +type GenTrafficResponse struct { + Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GenTrafficResponse) Reset() { *m = GenTrafficResponse{} } +func (m *GenTrafficResponse) String() string { return proto.CompactTextString(m) } +func (*GenTrafficResponse) ProtoMessage() {} +func (*GenTrafficResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_01bc05ea16f96cbc, []int{6} +} + +func (m *GenTrafficResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GenTrafficResponse.Unmarshal(m, b) +} +func (m *GenTrafficResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GenTrafficResponse.Marshal(b, m, deterministic) +} +func (m *GenTrafficResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenTrafficResponse.Merge(m, src) +} +func (m *GenTrafficResponse) XXX_Size() int { + return xxx_messageInfo_GenTrafficResponse.Size(m) +} +func (m *GenTrafficResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GenTrafficResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GenTrafficResponse proto.InternalMessageInfo + +func (m *GenTrafficResponse) GetOutput() []byte { + if m != nil { + return m.Output + } + return nil +} + func init() { proto.RegisterType((*UEConfig)(nil), "magma.cwf.UEConfig") proto.RegisterType((*AuthenticateRequest)(nil), "magma.cwf.AuthenticateRequest") @@ -306,38 +369,44 @@ func init() { proto.RegisterType((*DisconnectRequest)(nil), "magma.cwf.DisconnectRequest") proto.RegisterType((*DisconnectResponse)(nil), "magma.cwf.DisconnectResponse") proto.RegisterType((*GenTrafficRequest)(nil), "magma.cwf.GenTrafficRequest") + proto.RegisterType((*GenTrafficResponse)(nil), "magma.cwf.GenTrafficResponse") } func init() { proto.RegisterFile("cwf/protos/ue_sim.proto", fileDescriptor_01bc05ea16f96cbc) } var fileDescriptor_01bc05ea16f96cbc = []byte{ - // 415 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x6f, 0xd3, 0x40, - 0x10, 0xc5, 0x9b, 0x36, 0x2d, 0xed, 0xe0, 0x03, 0xd9, 0x22, 0xe1, 0x98, 0x52, 0x22, 0x5f, 0x08, - 0x97, 0xb5, 0x28, 0x1c, 0x2a, 0x2e, 0xa8, 0x40, 0xc4, 0xa1, 0x07, 0xc0, 0x25, 0x3d, 0x20, 0xa1, - 0x6a, 0xbb, 0x1e, 0xbb, 0xab, 0x7a, 0x77, 0xdd, 0xfd, 0x43, 0xd4, 0xcf, 0xc2, 0x97, 0x45, 0xb1, - 0xf3, 0xc7, 0x51, 0xd3, 0x4a, 0x3d, 0x79, 0x77, 0xde, 0x9b, 0x19, 0xf9, 0xf7, 0x6c, 0x78, 0xc1, - 0x27, 0x79, 0x52, 0x19, 0xed, 0xb4, 0x4d, 0x3c, 0x5e, 0x58, 0x21, 0x69, 0x7d, 0x23, 0x7b, 0x92, - 0x15, 0x92, 0x51, 0x3e, 0xc9, 0xa3, 0xbe, 0x36, 0xfc, 0xd8, 0xcc, 0x5d, 0x5c, 0x4b, 0xa9, 0x55, - 0xe3, 0x8a, 0x0e, 0x0b, 0xad, 0x8b, 0x12, 0x1b, 0xed, 0xd2, 0xe7, 0xc9, 0xc4, 0xb0, 0xaa, 0x42, - 0x63, 0x1b, 0x3d, 0xce, 0x61, 0x77, 0x3c, 0xfa, 0xa2, 0x55, 0x2e, 0x0a, 0x42, 0xa0, 0x2b, 0xa4, - 0x15, 0x61, 0x67, 0xd0, 0x19, 0xee, 0xa5, 0xf5, 0x99, 0xf4, 0x61, 0x97, 0x79, 0x77, 0x75, 0x71, - 0x8d, 0xb7, 0xe1, 0xe6, 0xa0, 0x33, 0x0c, 0xd2, 0x27, 0xd3, 0xfb, 0x29, 0xde, 0x2e, 0x24, 0x5d, - 0xf1, 0x70, 0x6b, 0x29, 0x7d, 0xaf, 0x38, 0x79, 0x06, 0x5b, 0x16, 0x6f, 0xc2, 0xee, 0xa0, 0x33, - 0xec, 0xa6, 0xd3, 0x63, 0xfc, 0x16, 0xf6, 0x4f, 0xbc, 0xbb, 0x42, 0xe5, 0x04, 0x67, 0x0e, 0x53, - 0xbc, 0xf1, 0x68, 0xdd, 0xba, 0x95, 0xf1, 0x47, 0x78, 0xbe, 0x6a, 0xb5, 0x95, 0x56, 0x16, 0x49, - 0x0c, 0x81, 0x61, 0x99, 0xf0, 0xf6, 0x07, 0xe3, 0xd7, 0xe8, 0xea, 0x9e, 0x20, 0x5d, 0xa9, 0xc5, - 0x6f, 0xa0, 0xf7, 0x55, 0x58, 0xae, 0x95, 0x42, 0xee, 0x1e, 0x5a, 0x72, 0x0c, 0xa4, 0x6d, 0x7c, - 0xc4, 0x8a, 0x3f, 0xd0, 0xfb, 0x86, 0xea, 0x97, 0x61, 0x79, 0x2e, 0xf8, 0x03, 0x2b, 0xc8, 0x07, - 0xd8, 0xf9, 0xab, 0x4b, 0x2f, 0xb1, 0x06, 0xf7, 0xf4, 0xe8, 0x80, 0x36, 0x59, 0xd0, 0x79, 0x16, - 0xf4, 0xcc, 0x19, 0xa1, 0x8a, 0x73, 0x56, 0x7a, 0x4c, 0x67, 0xde, 0xa3, 0x7f, 0x9b, 0xb0, 0x3d, - 0x1e, 0x9d, 0x09, 0x49, 0xde, 0xc1, 0xf6, 0x49, 0x96, 0x8d, 0x47, 0x64, 0x9f, 0x2e, 0xa2, 0xa6, - 0xf3, 0xb0, 0xa2, 0xde, 0xac, 0x58, 0x47, 0x4f, 0xcf, 0xb5, 0xc8, 0xe2, 0x0d, 0xf2, 0x13, 0x82, - 0x36, 0x3a, 0x72, 0xd8, 0xea, 0x5c, 0x83, 0x3f, 0x7a, 0x7d, 0xaf, 0xde, 0x00, 0x89, 0x37, 0xc8, - 0x29, 0xc0, 0x12, 0x14, 0x39, 0x68, 0x35, 0xdc, 0x01, 0x1d, 0xbd, 0xba, 0x47, 0x5d, 0x0c, 0xfb, - 0x04, 0xb0, 0x64, 0xb7, 0x32, 0xec, 0x0e, 0xd2, 0xb5, 0x2f, 0xf8, 0xf9, 0xe5, 0xef, 0x7e, 0x5d, - 0x4d, 0xa6, 0x7f, 0x05, 0x2f, 0xb5, 0xcf, 0x92, 0x42, 0xcf, 0x3e, 0xfc, 0xcb, 0x9d, 0xfa, 0xf9, - 0xfe, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x33, 0x09, 0xa2, 0x33, 0x03, 0x00, 0x00, + // 481 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x8d, 0xdb, 0x24, 0x4d, 0xa7, 0x39, 0x90, 0x2d, 0x82, 0xc4, 0x94, 0x60, 0xf9, 0x42, 0x90, + 0x90, 0x2d, 0x0a, 0x42, 0x15, 0xb7, 0x02, 0x11, 0x42, 0x15, 0x02, 0x1c, 0xd2, 0x03, 0x97, 0x6a, + 0xb3, 0x1e, 0xbb, 0xab, 0xc6, 0x5e, 0x77, 0x3f, 0x1a, 0xf5, 0x17, 0xf1, 0x93, 0xf8, 0x3b, 0x28, + 0xfe, 0x68, 0x1c, 0xa5, 0x2d, 0xea, 0xc9, 0x3b, 0xf3, 0xde, 0xcc, 0x78, 0xdf, 0xbc, 0x85, 0xa7, + 0x6c, 0x11, 0xf9, 0x99, 0x14, 0x5a, 0x28, 0xdf, 0xe0, 0x99, 0xe2, 0x89, 0x97, 0x47, 0x64, 0x37, + 0xa1, 0x71, 0x42, 0x3d, 0xb6, 0x88, 0xec, 0x81, 0x90, 0xec, 0x48, 0x56, 0x2c, 0x26, 0x92, 0x44, + 0xa4, 0x05, 0xcb, 0x1e, 0xc6, 0x42, 0xc4, 0x73, 0x2c, 0xb0, 0x99, 0x89, 0xfc, 0x85, 0xa4, 0x59, + 0x86, 0x52, 0x15, 0xb8, 0x1b, 0x41, 0x67, 0x3a, 0xfe, 0x24, 0xd2, 0x88, 0xc7, 0x84, 0x40, 0x93, + 0x27, 0x8a, 0xf7, 0x2d, 0xc7, 0x1a, 0xed, 0x06, 0xf9, 0x99, 0x0c, 0xa0, 0x43, 0x8d, 0x3e, 0x3f, + 0xbb, 0xc0, 0xeb, 0xfe, 0x96, 0x63, 0x8d, 0xba, 0xc1, 0xce, 0x32, 0x3e, 0xc1, 0xeb, 0x1b, 0x48, + 0x64, 0xac, 0xbf, 0xbd, 0x82, 0xbe, 0x67, 0x8c, 0x3c, 0x82, 0x6d, 0x85, 0x97, 0xfd, 0xa6, 0x63, + 0x8d, 0x9a, 0xc1, 0xf2, 0xe8, 0xbe, 0x82, 0xfd, 0x63, 0xa3, 0xcf, 0x31, 0xd5, 0x9c, 0x51, 0x8d, + 0x01, 0x5e, 0x1a, 0x54, 0xfa, 0xb6, 0x91, 0xee, 0x07, 0x78, 0xbc, 0x4e, 0x55, 0x99, 0x48, 0x15, + 0x12, 0x17, 0xba, 0x92, 0x86, 0xdc, 0xa8, 0x1f, 0x94, 0x5d, 0xa0, 0xce, 0x6b, 0xba, 0xc1, 0x5a, + 0xce, 0x7d, 0x09, 0xbd, 0xcf, 0x5c, 0x31, 0x91, 0xa6, 0xc8, 0xf4, 0x7d, 0x43, 0x8e, 0x80, 0xd4, + 0x89, 0x0f, 0x18, 0xf1, 0xd7, 0x82, 0xde, 0x17, 0x4c, 0x7f, 0x49, 0x1a, 0x45, 0x9c, 0xdd, 0x33, + 0x83, 0xbc, 0x83, 0xf6, 0x95, 0x98, 0x9b, 0x04, 0x73, 0xe5, 0xf6, 0x0e, 0x0f, 0xbc, 0x62, 0x19, + 0x5e, 0xb5, 0x0c, 0x6f, 0xa2, 0x25, 0x4f, 0xe3, 0x53, 0x3a, 0x37, 0x18, 0x94, 0x5c, 0xf2, 0x1e, + 0x76, 0x66, 0x5c, 0x4b, 0xaa, 0x31, 0x57, 0xf5, 0x7f, 0x65, 0x15, 0x99, 0x0c, 0x01, 0x34, 0x4f, + 0xf0, 0x6b, 0x3a, 0x41, 0xa6, 0x4a, 0xe9, 0x6b, 0x19, 0xe2, 0xc0, 0x9e, 0xc4, 0x2b, 0x94, 0x0a, + 0xbf, 0x89, 0x10, 0xfb, 0x2d, 0xc7, 0x1a, 0x75, 0x82, 0x7a, 0xca, 0x7d, 0x0d, 0xa4, 0x7e, 0xb1, + 0x52, 0x93, 0x27, 0xd0, 0x16, 0x46, 0x67, 0xa6, 0x52, 0xa3, 0x8c, 0x0e, 0xff, 0x6c, 0x41, 0x6b, + 0x3a, 0x9e, 0xf0, 0x84, 0xbc, 0x81, 0xd6, 0x71, 0x18, 0x4e, 0xc7, 0x64, 0xdf, 0xbb, 0xf1, 0xa4, + 0x57, 0xb9, 0xca, 0xee, 0x95, 0xc9, 0xdc, 0xa3, 0xde, 0xa9, 0xe0, 0xa1, 0xdb, 0x20, 0x3f, 0xa1, + 0x5b, 0xdf, 0x31, 0x19, 0xd6, 0x2a, 0x6f, 0xf1, 0x89, 0xfd, 0xe2, 0x4e, 0xbc, 0xf8, 0x4b, 0xb7, + 0x41, 0x4e, 0x00, 0x56, 0x1b, 0x25, 0x07, 0xb5, 0x82, 0x0d, 0x47, 0xd8, 0xcf, 0xef, 0x40, 0xeb, + 0xcd, 0x56, 0x52, 0xac, 0x35, 0xdb, 0x58, 0xfd, 0x5a, 0xb3, 0x4d, 0xfd, 0xdc, 0xc6, 0xc7, 0x67, + 0xbf, 0x07, 0x39, 0xc3, 0x5f, 0x3e, 0x65, 0x36, 0x17, 0x26, 0xf4, 0x63, 0x51, 0xbe, 0xd6, 0x59, + 0x3b, 0xff, 0xbe, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x29, 0xd5, 0x0d, 0x19, 0xe8, 0x03, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -360,7 +429,7 @@ type UESimClient interface { Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) Disconnect(ctx context.Context, in *DisconnectRequest, opts ...grpc.CallOption) (*DisconnectResponse, error) // Triggers iperf traffic towards the CWAG - GenTraffic(ctx context.Context, in *GenTrafficRequest, opts ...grpc.CallOption) (*protos.Void, error) + GenTraffic(ctx context.Context, in *GenTrafficRequest, opts ...grpc.CallOption) (*GenTrafficResponse, error) } type uESimClient struct { @@ -398,8 +467,8 @@ func (c *uESimClient) Disconnect(ctx context.Context, in *DisconnectRequest, opt return out, nil } -func (c *uESimClient) GenTraffic(ctx context.Context, in *GenTrafficRequest, opts ...grpc.CallOption) (*protos.Void, error) { - out := new(protos.Void) +func (c *uESimClient) GenTraffic(ctx context.Context, in *GenTrafficRequest, opts ...grpc.CallOption) (*GenTrafficResponse, error) { + out := new(GenTrafficResponse) err := c.cc.Invoke(ctx, "/magma.cwf.UESim/GenTraffic", in, out, opts...) if err != nil { return nil, err @@ -417,7 +486,7 @@ type UESimServer interface { Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error) Disconnect(context.Context, *DisconnectRequest) (*DisconnectResponse, error) // Triggers iperf traffic towards the CWAG - GenTraffic(context.Context, *GenTrafficRequest) (*protos.Void, error) + GenTraffic(context.Context, *GenTrafficRequest) (*GenTrafficResponse, error) } // UnimplementedUESimServer can be embedded to have forward compatible implementations. @@ -433,7 +502,7 @@ func (*UnimplementedUESimServer) Authenticate(ctx context.Context, req *Authenti func (*UnimplementedUESimServer) Disconnect(ctx context.Context, req *DisconnectRequest) (*DisconnectResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Disconnect not implemented") } -func (*UnimplementedUESimServer) GenTraffic(ctx context.Context, req *GenTrafficRequest) (*protos.Void, error) { +func (*UnimplementedUESimServer) GenTraffic(ctx context.Context, req *GenTrafficRequest) (*GenTrafficResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GenTraffic not implemented") } diff --git a/cwf/gateway/configs/magmad.yml b/cwf/gateway/configs/magmad.yml index 6e0ed0f8ed5c..3c4acfce4a84 100644 --- a/cwf/gateway/configs/magmad.yml +++ b/cwf/gateway/configs/magmad.yml @@ -49,12 +49,9 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: True enable_network_monitor: True -enable_systemd_tailer: False enable_sync_rpc: True enable_kernel_version_checking: True -systemd_tailer_poll_interval: 30 # seconds - upgrader_factory: module: magma.magmad.upgrade.docker_upgrader class: DockerUpgraderFactory diff --git a/cwf/gateway/configs/redirectd.yml b/cwf/gateway/configs/redirectd.yml index a7a00e6a59b9..73800ae04b4a 100644 --- a/cwf/gateway/configs/redirectd.yml +++ b/cwf/gateway/configs/redirectd.yml @@ -9,6 +9,4 @@ log_level: INFO -scribe_logging_enabled: False - http_port: 8080 diff --git a/cwf/gateway/docker/.env b/cwf/gateway/docker/.env index 708e85fdfab5..263857b35187 100644 --- a/cwf/gateway/docker/.env +++ b/cwf/gateway/docker/.env @@ -13,7 +13,6 @@ IMAGE_VERSION=latest BUILD_CONTEXT=../../.. -CERTS_PATH=../../../.cache/test_certs/ ROOTCA_PATH=../../../.cache/test_certs/rootCA.pem CONTROL_PROXY_PATH=../configs/control_proxy.yml CONFIGS_TEMPLATES_PATH=../../../orc8r/gateway/configs/templates diff --git a/cwf/gateway/docker/docker-compose.integ-test.yml b/cwf/gateway/docker/docker-compose.integ-test.yml index a3eeff553c9c..5efd53d5324e 100644 --- a/cwf/gateway/docker/docker-compose.integ-test.yml +++ b/cwf/gateway/docker/docker-compose.integ-test.yml @@ -61,9 +61,6 @@ services: sessiond: volumes: - ../integ_tests/sessiond.yml:/etc/magma/sessiond.yml - extra_hosts: - - controller.magma.test:127.0.0.1 - - bootstrapper-controller.magma.test:127.0.0.1 swx_proxy: <<: *feggoservice @@ -106,38 +103,8 @@ services: /bin/bash -c "/usr/bin/redis-server /var/opt/magma/redis.conf --daemonize no && /usr/bin/redis-cli shutdown" - control_proxy: - extra_hosts: - - controller.magma.test:127.0.0.1 - - bootstrapper-controller.magma.test:127.0.0.1 - command: > - sh -c "mkdir -p /usr/share/ca-certificates/extra/ && - cp -f /var/opt/magma/certs/rootCA.pem /usr/share/ca-certificates/extra/ && - echo \"extra/rootCA.pem\" >> /etc/ca-certificates.conf && update-ca-certificates && - /usr/local/bin/generate_nghttpx_config.py && - /usr/bin/env nghttpx --conf /var/opt/magma/tmp/nghttpx.conf /var/opt/magma/certs/controller.key /var/opt/magma/certs/controller.crt" - - magmad: - extra_hosts: - - controller.magma.test:127.0.0.1 - - bootstrapper-controller.magma.test:127.0.0.1 - uesim: <<: *service container_name: uesim image: ${DOCKER_REGISTRY}uesim:${IMAGE_VERSION} command: envdir /var/opt/magma/envdir /var/opt/magma/bin/uesim -logtostderr=true -v=0 - - ingress: - <<: *service - container_name: ingress - image: nginx:latest - extra_hosts: - - controller.magma.test:127.0.0.1 - - bootstrapper-controller.magma.test:127.0.0.1 - - session-proxy.magma.test:127.0.0.1 - - sessiond.magma.test:127.0.0.1 - volumes: - - ../integ_tests/nginx.conf:/etc/nginx/nginx.conf:ro - - ${CERTS_PATH}/:/etc/nginx/certs/ - command: /usr/sbin/nginx -g "daemon off;" diff --git a/cwf/gateway/integ_tests/README.md b/cwf/gateway/integ_tests/README.md index 8cca69a5b1d0..09e1b26d29a3 100644 --- a/cwf/gateway/integ_tests/README.md +++ b/cwf/gateway/integ_tests/README.md @@ -63,4 +63,4 @@ to see specific errors. → Since docker does not garbage collect previously built images, we will have to manually prune them. Run `docker system df` to see memory usage -and what can be deleted. To remove these images, run `docker system prune to docker image prune --filter until=24h`. \ No newline at end of file +and what can be deleted. To remove these images, run `docker system prune to docker image prune --filter until=24h`. diff --git a/cwf/gateway/integ_tests/auth_ul_test.go b/cwf/gateway/integ_tests/auth_ul_test.go index 0515a4c5c60d..5294eb3d9a93 100644 --- a/cwf/gateway/integ_tests/auth_ul_test.go +++ b/cwf/gateway/integ_tests/auth_ul_test.go @@ -14,9 +14,11 @@ import ( "testing" "fbc/lib/go/radius/rfc2869" + cwfprotos "magma/cwf/cloud/go/protos" "magma/feg/gateway/services/eap" "github.com/go-openapi/swag" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" ) @@ -39,7 +41,8 @@ func TestAuthenticateUplinkTraffic(t *testing.T) { assert.NotNil(t, eapMessage) assert.True(t, reflect.DeepEqual(int(eapMessage[0]), eap.SuccessCode)) - err = tr.GenULTraffic(ue.GetImsi(), swag.String("100K")) + req := &cwfprotos.GenTrafficRequest{Imsi: ue.GetImsi(), Volume: &wrappers.StringValue{Value: *swag.String("100K")}} + _, err = tr.GenULTraffic(req) assert.NoError(t, err) // Clear hss, ocs, and pcrf diff --git a/cwf/gateway/integ_tests/enforcement_test.go b/cwf/gateway/integ_tests/enforcement_test.go index 42395e8b37c7..e2ef9287baff 100644 --- a/cwf/gateway/integ_tests/enforcement_test.go +++ b/cwf/gateway/integ_tests/enforcement_test.go @@ -9,21 +9,27 @@ package integ_tests import ( + "encoding/json" "fmt" "reflect" "testing" "time" "fbc/lib/go/radius/rfc2869" + cwfprotos "magma/cwf/cloud/go/protos" "magma/feg/gateway/services/eap" "magma/lte/cloud/go/plugin/models" + lteProtos "magma/lte/cloud/go/protos" + "github.com/go-openapi/swag" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" ) const ( KiloBytes = 1024 + MegaBytes = 1024 * KiloBytes Buffer = 50 * KiloBytes ) @@ -59,7 +65,8 @@ func TestAuthenticateUplinkTrafficWithEnforcement(t *testing.T) { assert.True(t, reflect.DeepEqual(int(eapMessage[0]), eap.SuccessCode), fmt.Sprintf("UE Authentication did not return success")) // TODO assert CCR-I - err = tr.GenULTraffic(imsi, swag.String("500K")) + req := &cwfprotos.GenTrafficRequest{Imsi: imsi, Volume: &wrappers.StringValue{Value: *swag.String("500K")}} + _, err = tr.GenULTraffic(req) assert.NoError(t, err) // Wait for the traffic to go through @@ -77,7 +84,165 @@ func TestAuthenticateUplinkTrafficWithEnforcement(t *testing.T) { // TODO Talk to PCRF and verify appropriate CCRs propagate up _, err = tr.Disconnect(imsi) assert.NoError(t, err) + + // Clear hss, ocs, and pcrf + assert.NoError(t, ruleManager.RemoveInstalledRules()) + assert.NoError(t, tr.CleanUp()) +} + +func TestAuthenticateUplinkTrafficWithQosEnforcement(t *testing.T) { + fmt.Printf("Running TestAuthenticateUplinkTrafficWithQosEnforcement...\n") + tr := NewTestRunner() + ruleManager, err := NewRuleManager() + assert.NoError(t, err) + + ues, err := tr.ConfigUEs(1) + assert.NoError(t, err) + imsi := ues[0].GetImsi() + + err = ruleManager.AddUsageMonitor(imsi, "mqos1", 1000*MegaBytes, 250*MegaBytes) + assert.NoError(t, err) + + rule := getStaticPassAll("static-qos-1", "mqos1", 0, models.PolicyRuleTrackingTypeONLYPCRF, 3) + rule.Qos = <eProtos.FlowQos{ + MaxReqBwUl: uint32(1000000), + GbrUl: uint32(12000), + } + + err = ruleManager.AddStaticRuleToDB(rule) + assert.NoError(t, err) + err = ruleManager.AddRulesToPCRF(imsi, []string{"static-qos-1"}, nil) + assert.NoError(t, err) + + // wait for the rules to be synced into sessiond + time.Sleep(1 * time.Second) + + radiusP, err := tr.Authenticate(imsi) + assert.NoError(t, err) + + eapMessage := radiusP.Attributes.Get(rfc2869.EAPMessage_Type) + assert.NotNil(t, eapMessage, fmt.Sprintf("EAP Message from authentication is nil")) + assert.True(t, reflect.DeepEqual(int(eapMessage[0]), eap.SuccessCode), fmt.Sprintf("UE Authentication did not return success")) + + req := &cwfprotos.GenTrafficRequest{ + Imsi: imsi, + Bitrate: &wrappers.StringValue{Value: "5m"}, + TimeInSecs: uint64(10)} + + resp, err := tr.GenULTraffic(req) + assert.NoError(t, err) + + // Wait for the traffic to go through + time.Sleep(6 * time.Second) + + // Assert that enforcement_stats rules are properly installed and the right + // amount of data was passed through + recordsBySubID, err := tr.GetPolicyUsage() + assert.NoError(t, err) + record := recordsBySubID["IMSI"+imsi]["static-qos-1"] + assert.NotNil(t, record, fmt.Sprintf("No policy usage record for imsi: %v", imsi)) + + if resp != nil { + var perfResp map[string]interface{} + json.Unmarshal([]byte(resp.Output), &perfResp) + + intervalResp := perfResp["intervals"].([]interface{}) + assert.Equal(t, len(intervalResp), 10) + + // verify that starting bit rate was > 500k + firstIntvl := intervalResp[0].(map[string]interface{}) + firstIntvlSumMap := firstIntvl["sum"].(map[string]interface{}) + b := firstIntvlSumMap["bits_per_second"].(float64) + fmt.Println("initial bit rate transmitted by traffic gen", b) + assert.GreaterOrEqual(t, b, float64(500*1024)) + + // Ensure that the overall bitrate recd by server was <= 128k + respEndRecd := perfResp["end"].(map[string]interface{}) + respEndRcvMap := respEndRecd["sum_received"].(map[string]interface{}) + b = respEndRcvMap["bits_per_second"].(float64) + fmt.Println("bit rate observed at server ", b) + assert.LessOrEqual(t, b, float64(1000000)) + } + _, err = tr.Disconnect(imsi) + assert.NoError(t, err) time.Sleep(3 * time.Second) + + // Clear hss, ocs, and pcrf + assert.NoError(t, ruleManager.RemoveInstalledRules()) + assert.NoError(t, tr.CleanUp()) + fmt.Println("wait for flows to get deactivated") + time.Sleep(10 * time.Second) +} + +func testAuthenticateDownlinkTrafficWithQosEnforcement(t *testing.T) { + fmt.Printf("Running TestAuthenticateDownlinkTrafficWithQosEnforcement...\n") + tr := NewTestRunner() + ruleManager, err := NewRuleManager() + assert.NoError(t, err) + + ues, err := tr.ConfigUEs(1) + assert.NoError(t, err) + imsi := ues[0].GetImsi() + + err = ruleManager.AddUsageMonitor(imsi, "mqos2", 1000*MegaBytes, 250*MegaBytes) + assert.NoError(t, err) + + rule := getStaticPassAll("static-qos-2", "mqos2", 0, models.PolicyRuleTrackingTypeONLYPCRF, 3) + rule.Qos = <eProtos.FlowQos{ + MaxReqBwDl: uint32(1000000), + GbrDl: uint32(12000), + } + + err = ruleManager.AddStaticRuleToDB(rule) + assert.NoError(t, err) + err = ruleManager.AddRulesToPCRF(imsi, []string{"static-qos-2"}, nil) + assert.NoError(t, err) + + // wait for the rules to be synced into sessiond + time.Sleep(3 * time.Second) + + radiusP, err := tr.Authenticate(imsi) + assert.NoError(t, err) + + eapMessage := radiusP.Attributes.Get(rfc2869.EAPMessage_Type) + assert.NotNil(t, eapMessage, fmt.Sprintf("EAP Message from authentication is nil")) + assert.True(t, reflect.DeepEqual(int(eapMessage[0]), eap.SuccessCode), fmt.Sprintf("UE Authentication did not return success")) + + req := &cwfprotos.GenTrafficRequest{ + Imsi: imsi, + Bitrate: &wrappers.StringValue{Value: "5m"}, + TimeInSecs: uint64(10), + ReverseMode: true, + } + + resp, err := tr.GenULTraffic(req) + assert.NoError(t, err) + + // Wait for the traffic to go through + time.Sleep(6 * time.Second) + + // Assert that enforcement_stats rules are properly installed and the right + // amount of data was passed through + recordsBySubID, err := tr.GetPolicyUsage() + assert.NoError(t, err) + record := recordsBySubID["IMSI"+imsi]["static-qos-2"] + assert.NotNil(t, record, fmt.Sprintf("No policy usage record for imsi: %v", imsi)) + + if resp != nil { + var perfResp map[string]interface{} + json.Unmarshal([]byte(resp.Output), &perfResp) + + // Ensure that the overall bitrate recd by server was <= 128k + respEndRecd := perfResp["end"].(map[string]interface{}) + respEndRcvMap := respEndRecd["sum_received"].(map[string]interface{}) + b := respEndRcvMap["bits_per_second"].(float64) + fmt.Println("bit rate observed at server ", b) + assert.LessOrEqual(t, b, float64(1000000)) + } + _, err = tr.Disconnect(imsi) + assert.NoError(t, err) + time.Sleep(3 * time.Second) + // Clear hss, ocs, and pcrf assert.NoError(t, ruleManager.RemoveInstalledRules()) assert.NoError(t, tr.CleanUp()) diff --git a/cwf/gateway/integ_tests/nginx.conf b/cwf/gateway/integ_tests/nginx.conf deleted file mode 100644 index 67779b441574..000000000000 --- a/cwf/gateway/integ_tests/nginx.conf +++ /dev/null @@ -1,52 +0,0 @@ -user root; -worker_processes auto; -pid /run/nginx.pid; -include /etc/nginx/modules-enabled/*.conf; - -events { - worker_connections 768; - #multi_accept on; -} - -http { - server { - error_log /var/log/nginx/error.log info; - access_log /var/log/nginx/access.log; - - listen 9443 ssl http2; - server_name controller.magma.test; - root /var/www; - - ssl_certificate /etc/nginx/certs/controller.crt; - ssl_certificate_key /etc/nginx/certs/controller.key; - - - location /magma.lte.SessionProxyResponder/ { - grpc_pass grpc://session-proxy.magma.test:50065; - http2_push_preload on; - } - - location /magma.lte.CentralSessionController/ { - grpc_pass grpc://sessiond.magma.test:9097; - http2_push_preload on; - } - - location /magma.orc8r.Streamer/ { - grpc_set_header Content-Type "application/gprc"; - http2_push_preload on; - return 404; - } - - location /magma.orc8r.MetricsController/ { - grpc_set_header Content-Type "application/gprc"; - http2_push_preload on; - return 404; - } - - location /magma.orc8r.StateService/ { - grpc_set_header Content-Type application/gprc; - http2_push_preload on; - return 404; - } - } -} diff --git a/cwf/gateway/integ_tests/ocs_credit_exhausted1_test.go b/cwf/gateway/integ_tests/ocs_credit_exhausted1_test.go index 26c5ba8b3604..623aad4ffeb8 100644 --- a/cwf/gateway/integ_tests/ocs_credit_exhausted1_test.go +++ b/cwf/gateway/integ_tests/ocs_credit_exhausted1_test.go @@ -10,6 +10,7 @@ package integ_tests import ( "fmt" + cwfprotos "magma/cwf/cloud/go/protos" fegprotos "magma/feg/cloud/go/protos" "reflect" "testing" @@ -20,6 +21,7 @@ import ( "magma/lte/cloud/go/plugin/models" "github.com/go-openapi/swag" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" ) @@ -82,12 +84,13 @@ func TestAuthenticateOcsCreditExhaustedWithCRRU(t *testing.T) { time.Sleep(2 * time.Second) // we need to generate over 80% of the quota to trigger a CCR update - err = tr.GenULTraffic(ue.GetImsi(), swag.String("5M")) + req := &cwfprotos.GenTrafficRequest{Imsi: ue.GetImsi(), Volume: &wrappers.StringValue{Value: *swag.String("5M")}} + _, err = tr.GenULTraffic(req) assert.NoError(t, err) time.Sleep(3 * time.Second) // we need to generate over 100% of the quota to trigger a session termination - err = tr.GenULTraffic(ue.GetImsi(), swag.String("5M")) + _, err = tr.GenULTraffic(req) assert.NoError(t, err) // Wait for traffic to go through diff --git a/cwf/gateway/integ_tests/ocs_credit_exhausted2_test.go b/cwf/gateway/integ_tests/ocs_credit_exhausted2_test.go index 42af30f38708..4bf2e44d9446 100644 --- a/cwf/gateway/integ_tests/ocs_credit_exhausted2_test.go +++ b/cwf/gateway/integ_tests/ocs_credit_exhausted2_test.go @@ -10,6 +10,7 @@ package integ_tests import ( "fmt" + cwfprotos "magma/cwf/cloud/go/protos" fegprotos "magma/feg/cloud/go/protos" "reflect" "testing" @@ -20,6 +21,7 @@ import ( "magma/lte/cloud/go/plugin/models" "github.com/go-openapi/swag" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" ) @@ -76,7 +78,8 @@ func TestAuthenticateOcsCreditExhaustedWithoutCRRU(t *testing.T) { time.Sleep(2 * time.Second) // we need to generate over 100% of the quota to trigger a session termination - err = tr.GenULTraffic(ue.GetImsi(), swag.String("5M")) + req := &cwfprotos.GenTrafficRequest{Imsi: ue.GetImsi(), Volume: &wrappers.StringValue{Value: *swag.String("5M")}} + _, err = tr.GenULTraffic(req) assert.NoError(t, err) // Wait for traffic to go through diff --git a/cwf/gateway/integ_tests/omni_rules_test.go b/cwf/gateway/integ_tests/omni_rules_test.go index d18bce87e607..9316ff814058 100644 --- a/cwf/gateway/integ_tests/omni_rules_test.go +++ b/cwf/gateway/integ_tests/omni_rules_test.go @@ -15,10 +15,12 @@ import ( "time" "fbc/lib/go/radius/rfc2869" + cwfprotos "magma/cwf/cloud/go/protos" "magma/feg/gateway/services/eap" "magma/lte/cloud/go/plugin/models" "github.com/go-openapi/swag" + "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" ) @@ -55,7 +57,8 @@ func TestAuthenticateUplinkTrafficWithOmniRules(t *testing.T) { assert.NotNil(t, eapMessage) assert.True(t, reflect.DeepEqual(int(eapMessage[0]), eap.SuccessCode)) - err = tr.GenULTraffic(ue.GetImsi(), swag.String("200K")) + req := &cwfprotos.GenTrafficRequest{Imsi: ue.GetImsi(), Volume: &wrappers.StringValue{Value: *swag.String("200k")}} + _, err = tr.GenULTraffic(req) assert.NoError(t, err) // Wait for traffic to go through diff --git a/cwf/gateway/integ_tests/sessiond.yml b/cwf/gateway/integ_tests/sessiond.yml index 028ec0cf8a1e..b4f9c9c9a4cd 100644 --- a/cwf/gateway/integ_tests/sessiond.yml +++ b/cwf/gateway/integ_tests/sessiond.yml @@ -31,3 +31,11 @@ session_force_termination_timeout_ms: 5000 # Set to true to enable sessiond support of carrier wifi support_carrier_wifi: true + +# For Testing +# Set to use a locally running session proxy instance. If +# use_local_session_proxy is set to true sessiond will use 127.0.0.1: as +# the address for the session proxy service. +# Note: these flags are only relevant when relay is turned on +use_local_session_proxy: true +local_session_proxy_port: 9097 \ No newline at end of file diff --git a/cwf/gateway/integ_tests/test_runner.go b/cwf/gateway/integ_tests/test_runner.go index 5f2f181b9d9b..1ad6c7855afe 100644 --- a/cwf/gateway/integ_tests/test_runner.go +++ b/cwf/gateway/integ_tests/test_runner.go @@ -20,7 +20,6 @@ import ( "magma/lte/cloud/go/crypto" lteprotos "magma/lte/cloud/go/protos" - "github.com/golang/protobuf/ptypes/wrappers" "github.com/pkg/errors" ) @@ -161,16 +160,9 @@ func (testRunner *TestRunner) Disconnect(imsi string) (*radius.Packet, error) { // GenULTraffic simulates the UE sending traffic through the CWAG to the Internet // by running an iperf3 client on the UE simulator and an iperf3 server on the -// Magma traffic server. volume, if provided, specifies the volume of data -// generated and it should be in the form of "1024K", "2048M" etc -func (testRunner *TestRunner) GenULTraffic(imsi string, volume *string) error { - fmt.Printf("************************* Generating Traffic for UE with IMSI: %s\n", imsi) - req := &cwfprotos.GenTrafficRequest{ - Imsi: imsi, - } - if volume != nil { - req.Volume = &wrappers.StringValue{Value: *volume} - } +// Magma traffic server. +func (testRunner *TestRunner) GenULTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) { + fmt.Printf("************************* Generating Traffic for UE with Req: %v\n", req) return uesim.GenTraffic(req) } diff --git a/cwf/gateway/services/uesim/client_api.go b/cwf/gateway/services/uesim/client_api.go index ffd3cd2624ee..8f3957cb5745 100644 --- a/cwf/gateway/services/uesim/client_api.go +++ b/cwf/gateway/services/uesim/client_api.go @@ -79,11 +79,11 @@ func Disconnect(id *cwfprotos.DisconnectRequest) (*cwfprotos.DisconnectResponse, // GenTraffic triggers traffic generation for the UE with the specified IMSI. // Input: The IMSI of the UE to simulate traffic for -func GenTraffic(req *cwfprotos.GenTrafficRequest) error { +func GenTraffic(req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) { cli, err := getUESimClient() if err != nil { - return err + return nil, err } - _, err = cli.GenTraffic(context.Background(), req) - return err + resp, err := cli.GenTraffic(context.Background(), req) + return resp, err } diff --git a/cwf/gateway/services/uesim/servicers/uesim.go b/cwf/gateway/services/uesim/servicers/uesim.go index 3828cd9f5594..7f431fa91109 100644 --- a/cwf/gateway/services/uesim/servicers/uesim.go +++ b/cwf/gateway/services/uesim/servicers/uesim.go @@ -11,6 +11,7 @@ package servicers import ( "fmt" "os/exec" + "strconv" "fbc/lib/go/radius" cwfprotos "magma/cwf/cloud/go/protos" @@ -155,21 +156,38 @@ func (srv *UESimServer) Disconnect(ctx context.Context, id *cwfprotos.Disconnect return &cwfprotos.DisconnectResponse{RadiusPacket: encoded}, nil } -func (srv *UESimServer) GenTraffic(ctx context.Context, req *cwfprotos.GenTrafficRequest) (*protos.Void, error) { +func (srv *UESimServer) GenTraffic(ctx context.Context, req *cwfprotos.GenTrafficRequest) (*cwfprotos.GenTrafficResponse, error) { if req == nil { - return &protos.Void{}, fmt.Errorf("Nil GenTrafficRequest provided") + return &cwfprotos.GenTrafficResponse{}, fmt.Errorf("Nil GenTrafficRequest provided") } var cmd *exec.Cmd - if req.Volume == nil { - cmd = exec.Command("iperf3", "-c", trafficSrvIP, "-M", trafficMSS) - } else { - cmd = exec.Command("iperf3", "-c", trafficSrvIP, "-M", trafficMSS, "-n", req.Volume.Value) + argList := []string{"--json", "-c", trafficSrvIP, "-M", trafficMSS} + if req.Volume != nil { + argList = append(argList, []string{"-n", req.Volume.Value}...) } + if req.ReverseMode { + argList = append(argList, "-R") + } + + if req.Bitrate != nil { + argList = append(argList, []string{"-b", req.Bitrate.Value}...) + } + + if req.TimeInSecs != 0 { + argList = append(argList, []string{"-t", strconv.FormatUint(req.TimeInSecs, 10)}...) + } + + cmd = exec.Command("iperf3", argList...) cmd.Dir = "/usr/bin" - _, err := cmd.Output() - return &protos.Void{}, err + output, err := cmd.Output() + if err != nil { + glog.Info("args = ", argList) + glog.Info("error = ", err) + err = errors.Wrap(err, fmt.Sprintf("argList %v\n output %v", argList, string(output))) + } + return &cwfprotos.GenTrafficResponse{Output: output}, err } // Converts UE data to a blob for storage. diff --git a/cwf/protos/ue_sim.proto b/cwf/protos/ue_sim.proto index d3cc0117242b..08c7200364fd 100644 --- a/cwf/protos/ue_sim.proto +++ b/cwf/protos/ue_sim.proto @@ -49,6 +49,13 @@ message DisconnectResponse { message GenTrafficRequest { string imsi = 1; google.protobuf.StringValue volume = 2; + google.protobuf.StringValue bitrate = 3; + uint64 timeInSecs = 4; + bool reverseMode = 5; +} + +message GenTrafficResponse { + bytes output = 1; } // -------------------------------------------------------------------------- @@ -66,5 +73,5 @@ service UESim { rpc Disconnect (DisconnectRequest) returns (DisconnectResponse) {} // Triggers iperf traffic towards the CWAG - rpc GenTraffic(GenTrafficRequest) returns (orc8r.Void) {} + rpc GenTraffic(GenTrafficRequest) returns (GenTrafficResponse) {} } diff --git a/devmand/gateway/docker/firstparty/symphony-agent/files/configs/magmad.yml b/devmand/gateway/docker/firstparty/symphony-agent/files/configs/magmad.yml index ef6a37a85856..ff57dd6769bf 100644 --- a/devmand/gateway/docker/firstparty/symphony-agent/files/configs/magmad.yml +++ b/devmand/gateway/docker/firstparty/symphony-agent/files/configs/magmad.yml @@ -23,11 +23,8 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: False enable_network_monitor: False -enable_systemd_tailer: False enable_sync_rpc: False -systemd_tailer_poll_interval: 30 # seconds - mconfig_modules: - orc8r.protos.mconfig.mconfigs_pb2 - devmand.protos.mconfig.mconfigs_pb2 diff --git a/example/gateway/configs/magmad.yml b/example/gateway/configs/magmad.yml index 0aaf65116acf..6c188e9d4b49 100644 --- a/example/gateway/configs/magmad.yml +++ b/example/gateway/configs/magmad.yml @@ -21,11 +21,8 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: False enable_network_monitor: False -enable_systemd_tailer: False enable_sync_rpc: False -systemd_tailer_poll_interval: 30 # seconds - # Modules containing code generated for mconfig protobufs mconfig_modules: - orc8r.protos.mconfig.mconfigs_pb2 diff --git a/feg/cloud/go/plugin/handlers/handlers_test.go b/feg/cloud/go/plugin/handlers/handlers_test.go index f08bc8cb401f..aabfebd73438 100644 --- a/feg/cloud/go/plugin/handlers/handlers_test.go +++ b/feg/cloud/go/plugin/handlers/handlers_test.go @@ -258,7 +258,7 @@ func TestFederationGateways(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) _ = plugin.RegisterPluginForTests(t, &plugin2.FegOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) stateTestInit.StartTestService(t) deviceTestInit.StartTestService(t) diff --git a/feg/cloud/go/services/feg_relay/servicers/feg_to_gw_relay.go b/feg/cloud/go/services/feg_relay/servicers/feg_to_gw_relay.go index 3c355618f77f..dfb071d9c94b 100644 --- a/feg/cloud/go/services/feg_relay/servicers/feg_to_gw_relay.go +++ b/feg/cloud/go/services/feg_relay/servicers/feg_to_gw_relay.go @@ -16,18 +16,15 @@ import ( "magma/feg/cloud/go/feg" "magma/feg/cloud/go/plugin/models" "magma/feg/cloud/go/services/feg_relay/utils" - "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/services/configurator" - "magma/orc8r/cloud/go/services/state" + "magma/orc8r/cloud/go/services/directoryd" + "magma/orc8r/cloud/go/services/dispatcher/gateway_registry" "magma/orc8r/lib/go/protos" "github.com/golang/glog" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/metadata" - - "magma/orc8r/cloud/go/services/directoryd" - "magma/orc8r/cloud/go/services/dispatcher/gateway_registry" ) // FegToGwRelayServer is a server serving requests from FeG to Access Gateway @@ -50,7 +47,7 @@ func getHwIDFromIMSI(ctx context.Context, imsi string) (string, error) { return "", err } for _, nid := range servedIds { - hwId, err := getHwid(nid, imsi) + hwId, err := directoryd.GetHWIDForIMSI(nid, imsi) if err == nil && len(hwId) != 0 { glog.V(2).Infof("IMSI to send is %v\n", imsi) return hwId, nil @@ -139,15 +136,3 @@ func getFegServedIds(networkId string) ([]string, error) { } return networkFegConfigs.ServedNetworkIds, nil } - -func getHwid(nid, imsi string) (string, error) { - st, err := state.GetState(nid, orc8r.DirectoryRecordType, imsi) - if err != nil { - return "", err - } - record, ok := st.ReportedState.(*directoryd.DirectoryRecord) - if !ok || len(record.LocationHistory) == 0 { - return "", err - } - return record.LocationHistory[0], nil -} diff --git a/feg/cloud/go/services/health/health/main.go b/feg/cloud/go/services/health/health/main.go index 2e1c35ad6e61..71966564ce02 100644 --- a/feg/cloud/go/services/health/health/main.go +++ b/feg/cloud/go/services/health/health/main.go @@ -18,7 +18,6 @@ import ( "magma/feg/cloud/go/services/health/reporter" "magma/feg/cloud/go/services/health/servicers" "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/sqorc" @@ -35,7 +34,7 @@ func main() { if err != nil { log.Fatalf("Error creating service: %s", err) } - db, err := sqorc.Open(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE) + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } diff --git a/feg/gateway/configs/magmad.yml b/feg/gateway/configs/magmad.yml index b7abc7345bd6..9ee826b673e5 100644 --- a/feg/gateway/configs/magmad.yml +++ b/feg/gateway/configs/magmad.yml @@ -38,11 +38,8 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: True enable_network_monitor: False -enable_systemd_tailer: False enable_sync_rpc: True -systemd_tailer_poll_interval: 30 # seconds - upgrader_factory: module: magma.magmad.upgrade.docker_upgrader class: DockerUpgraderFactory diff --git a/feg/gateway/go.mod b/feg/gateway/go.mod index d8575d5d4441..bfc91c838bc9 100644 --- a/feg/gateway/go.mod +++ b/feg/gateway/go.mod @@ -24,9 +24,7 @@ require ( github.com/go-redis/redis v6.14.1+incompatible github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.3.3 - github.com/gorilla/mux v1.6.2 github.com/ishidawataru/sctp v0.0.0-20190922091402-408ec287e38c - github.com/pkg/errors v0.8.1 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.2.0 diff --git a/feg/gateway/go.sum b/feg/gateway/go.sum index 379c24c046aa..add470145ea3 100644 --- a/feg/gateway/go.sum +++ b/feg/gateway/go.sum @@ -70,7 +70,6 @@ github.com/emakeev/go-diameter/v4 v4.0.4 h1:QZ8734e0Pyrp9NQtFH87HxOYcAUKuLx+cyAn github.com/emakeev/go-diameter/v4 v4.0.4/go.mod h1:Qx/+pf+c9sBUHWq1d7EH3bkdwN8U0mUpdy9BieDw6UQ= github.com/emakeev/snowflake v0.0.0-20200206205012-767080b052fe h1:AtQCu1EME1N7Gwb4BNcpeKNxPmMK6G2QCeN+rVJO1EM= github.com/emakeev/snowflake v0.0.0-20200206205012-767080b052fe/go.mod h1:kEQPRPTu2Cm00AIGJ0SWq8pOfa9kPZozrZ5/2XN2PB4= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -102,8 +101,6 @@ github.com/go-openapi/analysis v0.18.0 h1:hRMEymXOgwo7KLPqqFmw6t3jLO2/zxUe/TXjAH github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.17.2/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0 h1:+RnmJ5MQccF7jwWAoMzwOpzJEspZ18ZIWfg9Z2eiXq8= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/inflect v0.18.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= @@ -128,8 +125,6 @@ github.com/go-openapi/spec v0.18.0 h1:aIjeyG5mo5/FrvDkpKKEGZPmF9MPHahS72mzfVqeQX github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0 h1:FqqmmVCKn3di+ilU/+1m957T1CnMz3IteVUcV3aGXWA= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.4 h1:eRvaqAhpL0IL6Trh5fDsGnGhiXndzHFuA05w6sXH6/g= github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -381,6 +376,7 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/thoas/go-funk v0.4.0 h1:KBaa5NL7NMtsFlQaD8nQMbDt1wuM+OOaNQyYNYQFhVo= github.com/thoas/go-funk v0.4.0/go.mod h1:mlR+dHGb+4YgXkf13rkQTuzrneeHANxOm6+ZnEV9HsA= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/toqueteos/webbrowser v1.1.0/go.mod h1:Hqqqmzj8AHn+VlZyVjaRWY20i25hoOZGAABCcg2el4A= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= diff --git a/feg/gateway/services/session_proxy/servicers/policy.go b/feg/gateway/services/session_proxy/servicers/policy.go index b0dde323f400..1f0c3770ac5d 100644 --- a/feg/gateway/services/session_proxy/servicers/policy.go +++ b/feg/gateway/services/session_proxy/servicers/policy.go @@ -58,7 +58,7 @@ func (srv *CentralSessionController) sendTerminationGxRequest(pRequest *protos.S request := &gx.CreditControlRequest{ SessionID: pRequest.SessionId, Type: credit_control.CRTTerminate, - IMSI: credit_control.AddIMSIPrefix(pRequest.Sid), + IMSI: credit_control.RemoveIMSIPrefix(pRequest.Sid), RequestNumber: pRequest.RequestNumber, IPAddr: pRequest.UeIpv4, UsageReports: reports, diff --git a/feg/gateway/services/session_proxy/servicers/session_controller_test.go b/feg/gateway/services/session_proxy/servicers/session_controller_test.go index 6f50368d2abe..8ea4b9d23409 100644 --- a/feg/gateway/services/session_proxy/servicers/session_controller_test.go +++ b/feg/gateway/services/session_proxy/servicers/session_controller_test.go @@ -32,8 +32,10 @@ import ( ) const ( - IMSI1 = "IMSI00101" - IMSI2 = "IMSI00102" + IMSI1 = "IMSI00101" + IMSI2 = "IMSI00102" + IMSI1_NOPREFIX = "00101" + IMSI2_NOPREFIX = "00102" ) type MockPolicyClient struct { @@ -340,7 +342,7 @@ func standardUsageTest( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnDefaultGyResponse).Once() multiReqType = credit_control.CRTUpdate // on per session init, credits are received through CCR-Updates } @@ -349,7 +351,7 @@ func standardUsageTest( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(multiReqType)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, multiReqType)), ).Return(nil).Run(returnDefaultGyResponse).Once() createResponse, err := srv.CreateSession(ctx, &protos.CreateSessionRequest{ Subscriber: &protos.SubscriberID{ @@ -573,7 +575,7 @@ func TestSessionTermination(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTTerminate)), + mock.MatchedBy(getGxCCRMatcher(IMSI2_NOPREFIX, credit_control.CRTTerminate)), ).Return(nil).Run(func(args mock.Arguments) { done := args.Get(1).(chan interface{}) request := args.Get(2).(*gx.CreditControlRequest) @@ -588,7 +590,7 @@ func TestSessionTermination(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTTerminate)), + mock.MatchedBy(getGyCCRMatcher(IMSI2_NOPREFIX, credit_control.CRTTerminate)), ).Return(nil).Run(func(args mock.Arguments) { done := args.Get(1).(chan interface{}) request := args.Get(2).(*gy.CreditControlRequest) @@ -633,13 +635,13 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnDefaultGyResponse).Times(2) mocks.gx.On( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnDefaultGxUpdateResponse).Times(2) updateResponse, _ := srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -680,7 +682,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnEmptyGxUpdateResponse).Times(1) emptyUpdateResponse, _ := srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -702,7 +704,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(getRuleInstallGxUpdateResponse([]string{"static1", "static2"}, []string{})).Times(1) ruleInstallUpdateResponse, _ := srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -724,7 +726,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(getRuleInstallGxUpdateResponse([]string{}, []string{"base_10"})).Times(1) mocks.policydb.On("GetRuleIDsForBaseNames", []string{"base_10"}).Return([]string{"base_rule_1", "base_rule_2"}) @@ -747,7 +749,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnDynamicRuleInstallGxUpdateResponse).Times(1) ruleInstallUpdateResponse, _ = srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -768,7 +770,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(getRuleDisableGxUpdateResponse([]string{"rule1", "rule2"}, []string{})).Times(1) ruleDisableUpdateResponse, _ := srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -789,7 +791,7 @@ func TestGxUsageMonitoring(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(getRuleDisableGxUpdateResponse([]string{}, []string{"base_10"})).Times(1) mocks.policydb.On("GetRuleIDsForBaseNames", []string{"base_10"}).Return([]string{"base_rule_1", "base_rule_2"}) @@ -829,13 +831,13 @@ func TestGetHealthStatus(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnDefaultGyResponse).Times(2) mocks.gx.On( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(nil).Run(returnDefaultGxUpdateResponse).Times(2) _, _ = srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -860,13 +862,13 @@ func TestGetHealthStatus(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(fmt.Errorf("Failed to establish new diameter connection; will retry upon first request.")).Run(returnDefaultGyResponse).Times(2) mocks.gx.On( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTUpdate)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTUpdate)), ).Return(fmt.Errorf("Failed to establish new diameter connection; will retry upon first request.")).Run(returnDefaultGxUpdateResponse).Times(2) _, _ = srv.UpdateSession(ctx, &protos.UpdateSessionRequest{ @@ -1143,15 +1145,15 @@ func getRuleDisableGxUpdateResponse(ruleNames []string, ruleBaseNames []string) } } -func getGyCCRMatcher(ccrType credit_control.CreditRequestType) interface{} { +func getGyCCRMatcher(imsi string, ccrType credit_control.CreditRequestType) interface{} { return func(request *gy.CreditControlRequest) bool { - return request.Type == ccrType + return request.Type == ccrType && request.IMSI == imsi } } -func getGxCCRMatcher(ccrType credit_control.CreditRequestType) interface{} { +func getGxCCRMatcher(imsi string, ccrType credit_control.CreditRequestType) interface{} { return func(request *gx.CreditControlRequest) bool { - return request.Type == ccrType + return request.Type == ccrType && request.IMSI == imsi } } @@ -1193,7 +1195,7 @@ func TestSessionControllerUseGyForAuthOnlySuccess(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGySuccessNoRatingGroup).Once() cfg := getTestConfig(gy.PerKeyInit) @@ -1258,7 +1260,7 @@ func TestSessionControllerUseGyForAuthOnlyNoRatingGroup(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGySuccessNoRatingGroup).Once() cfg := getTestConfig(gy.PerKeyInit) @@ -1329,7 +1331,7 @@ func TestSessionControllerUseGyForAuthOnlyCreditLimitReached(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGySuccessCreditLimitReached).Once() cfg := getTestConfig(gy.PerKeyInit) @@ -1405,7 +1407,7 @@ func TestSessionControllerUseGyForAuthOnlySubscriberBarred(t *testing.T) { "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGySuccessSubscriberBarred).Once() cfg := getTestConfig(gy.PerKeyInit) @@ -1485,7 +1487,7 @@ func revalidationTimerTest( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGxCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGxCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGxSuccessRevalidationTimer).Once() mocks.policydb.On("GetOmnipresentRules").Return([]string{"omnipresent_rule_1"}, []string{"omnipresent_base_1"}) @@ -1497,7 +1499,7 @@ func revalidationTimerTest( "SendCreditControlRequest", mock.Anything, mock.Anything, - mock.MatchedBy(getGyCCRMatcher(credit_control.CRTInit)), + mock.MatchedBy(getGyCCRMatcher(IMSI1_NOPREFIX, credit_control.CRTInit)), ).Return(nil).Run(returnGySuccessNoRatingGroup).Once() } diff --git a/feg/gateway/services/testcore/mock_driver/mock_driver.go b/feg/gateway/services/testcore/mock_driver/mock_driver.go index dab7ce32a9ce..e58a26613871 100644 --- a/feg/gateway/services/testcore/mock_driver/mock_driver.go +++ b/feg/gateway/services/testcore/mock_driver/mock_driver.go @@ -9,13 +9,11 @@ package mock_driver import ( - "fmt" - "magma/feg/cloud/go/protos" ) type Expectation interface { - DoesMatch(interface{}) bool + DoesMatch(interface{}) error GetAnswer() interface{} } @@ -60,10 +58,10 @@ func (e *MockDriver) GetAnswerFromExpectations(message interface{}) interface{} return e.getAnswerForUnexpectedMessage() } expectation := e.expectations[e.expectationIndex] - doesMatch := expectation.DoesMatch(message) - if !doesMatch { - err := &protos.ErrorByIndex{Index: int32(e.expectationIndex), Error: fmt.Sprintf("Expected: %v, Received: %v", expectation, message)} - e.errorMessages = append(e.errorMessages, err) + err := expectation.DoesMatch(message) + if err != nil { + errByIndex := &protos.ErrorByIndex{Index: int32(e.expectationIndex), Error: err.Error()} + e.errorMessages = append(e.errorMessages, errByIndex) return e.getAnswerForUnexpectedMessage() } diff --git a/feg/gateway/services/testcore/mock_driver/mock_driver_test.go b/feg/gateway/services/testcore/mock_driver/mock_driver_test.go index f855bbc00709..d7f2ae24c423 100644 --- a/feg/gateway/services/testcore/mock_driver/mock_driver_test.go +++ b/feg/gateway/services/testcore/mock_driver/mock_driver_test.go @@ -9,6 +9,7 @@ package mock_driver_test import ( + "fmt" "testing" "magma/feg/cloud/go/protos" @@ -22,12 +23,15 @@ type TestExpectation struct { answer string } -func (t TestExpectation) DoesMatch(iReq interface{}) bool { +func (t TestExpectation) DoesMatch(iReq interface{}) error { request, ok := iReq.(string) if !ok { - return false + return fmt.Errorf("request is not of type string") } - return request == t.request + if request != t.request { + return fmt.Errorf("Expected: %v, Received: %v", t.request, request) + } + return nil } func (t TestExpectation) GetAnswer() interface{} { @@ -103,8 +107,8 @@ func TestSomeExpectationsNotMet(t *testing.T) { } assert.ElementsMatch(t, expectedResult, result) expectedErrors := []*protos.ErrorByIndex{ - {Index: 1, Error: "Expected: {req2 ans2}, Received: bad-req2-1"}, - {Index: 1, Error: "Expected: {req2 ans2}, Received: bad-req2-2"}, + {Index: 1, Error: "Expected: req2, Received: bad-req2-1"}, + {Index: 1, Error: "Expected: req2, Received: bad-req2-2"}, } assert.ElementsMatch(t, expectedErrors, errs) } diff --git a/feg/gateway/services/testcore/pcrf/mock_pcrf/conversions.go b/feg/gateway/services/testcore/pcrf/mock_pcrf/conversions.go index 7e2dc37a980a..494bb46bef36 100644 --- a/feg/gateway/services/testcore/pcrf/mock_pcrf/conversions.go +++ b/feg/gateway/services/testcore/pcrf/mock_pcrf/conversions.go @@ -18,18 +18,18 @@ import ( "github.com/fiorix/go-diameter/v4/diam/datatype" ) -func toStaticRuleNameInstallAVP(ruleName string) *diam.AVP { - return diam.NewAVP(avp.ChargingRuleInstall, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, &diam.GroupedAVP{ +func toStaticRuleNameAVP(ruleName string, action uint32) *diam.AVP { + return diam.NewAVP(action, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, &diam.GroupedAVP{ AVP: []*diam.AVP{ diam.NewAVP(avp.ChargingRuleName, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, datatype.OctetString(ruleName)), }, }) } -func toStaticBaseNameInstallAVP(baseName string) *diam.AVP { - return diam.NewAVP(avp.ChargingRuleInstall, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, &diam.GroupedAVP{ +func toStaticBaseNameAVP(ruleName string, action uint32) *diam.AVP { + return diam.NewAVP(action, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, &diam.GroupedAVP{ AVP: []*diam.AVP{ - diam.NewAVP(avp.ChargingRuleBaseName, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, datatype.UTF8String(baseName)), + diam.NewAVP(avp.ChargingRuleBaseName, avp.Mbit|avp.Vbit, diameter.Vendor3GPP, datatype.OctetString(ruleName)), }, }) } @@ -122,11 +122,11 @@ func toRuleInstallAVPs( ) []*diam.AVP { avps := make([]*diam.AVP, 0, len(ruleNames)+len(ruleBaseNames)+len(ruleDefs)) for _, ruleName := range ruleNames { - avps = append(avps, toStaticRuleNameInstallAVP(ruleName)) + avps = append(avps, toStaticRuleNameAVP(ruleName, avp.ChargingRuleInstall)) } for _, baseName := range ruleBaseNames { - avps = append(avps, toStaticBaseNameInstallAVP(baseName)) + avps = append(avps, toStaticBaseNameAVP(baseName, avp.ChargingRuleInstall)) } for _, rule := range ruleDefs { @@ -143,3 +143,23 @@ func toUsageMonitorAVPs(monitors map[string]*protos.UsageMonitor) []*diam.AVP { } return avps } + +func toRuleRemovalAVPs(ruleNames, ruleBaseNames []string) []*diam.AVP { + avps := make([]*diam.AVP, 0, len(ruleNames)+len(ruleBaseNames)) + for _, ruleName := range ruleNames { + avps = append(avps, toStaticRuleNameAVP(ruleName, avp.ChargingRuleRemove)) + } + + for _, baseName := range ruleBaseNames { + avps = append(avps, toStaticBaseNameAVP(baseName, avp.ChargingRuleRemove)) + } + return avps +} + +func toUsageMonitorByMkey(monitors []*usageMonitorRequestAVP) map[string]*usageMonitorRequestAVP { + monitorByKey := map[string]*usageMonitorRequestAVP{} + for _, monitor := range monitors { + monitorByKey[monitor.MonitoringKey] = monitor + } + return monitorByKey +} diff --git a/lte/cloud/go/lte/const.go b/lte/cloud/go/lte/const.go index 412c2b0801c2..1fb845f247ab 100644 --- a/lte/cloud/go/lte/const.go +++ b/lte/cloud/go/lte/const.go @@ -19,8 +19,8 @@ const ( NetworkSubscriberConfigType = "network_subscriber_config" EnodebStateType = "single_enodeb" - SubscriberStateType = "subscriber" SubscriberEntityType = "subscriber" + ICMPStateType = "icmp_monitoring" BaseNameEntityType = "base_name" PolicyRuleEntityType = "policy" diff --git a/lte/cloud/go/plugin/handlers/handlers.go b/lte/cloud/go/plugin/handlers/handlers.go index 925c3cb1000f..d39fb1402bbc 100644 --- a/lte/cloud/go/plugin/handlers/handlers.go +++ b/lte/cloud/go/plugin/handlers/handlers.go @@ -448,6 +448,8 @@ func addConnectedEnodeb(c echo.Context) error { return c.NoContent(http.StatusNoContent) } +var subscriberStateTypes = []string{lte.ICMPStateType} + func listSubscribers(c echo.Context) error { networkID, nerr := obsidian.GetNetworkId(c) if nerr != nil { @@ -459,9 +461,24 @@ func listSubscribers(c echo.Context) error { return obsidian.HttpError(err, http.StatusInternalServerError) } + allIMSIs := funk.Map(ents, func(e configurator.NetworkEntity) string { return e.Key }).([]string) + subStates, err := state.SearchStates(networkID, subscriberStateTypes, allIMSIs) + if err != nil { + return obsidian.HttpError(err, http.StatusInternalServerError) + } + statesByTypeBySid := map[string]map[string]state.State{} + for stateID, st := range subStates { + byType, ok := statesByTypeBySid[stateID.DeviceID] + if !ok { + byType = map[string]state.State{} + } + byType[stateID.Type] = st + statesByTypeBySid[stateID.DeviceID] = byType + } + ret := make(map[string]*ltemodels.Subscriber, len(ents)) for _, ent := range ents { - ret[ent.Key] = (<emodels.Subscriber{}).FromBackendModels(ent) + ret[ent.Key] = (<emodels.Subscriber{}).FromBackendModels(ent, statesByTypeBySid[ent.Key]) } return c.JSON(http.StatusOK, ret) } @@ -511,7 +528,16 @@ func getSubscriber(c echo.Context) error { return obsidian.HttpError(err, http.StatusInternalServerError) } - ret := (<emodels.Subscriber{}).FromBackendModels(ent) + states, err := state.SearchStates(networkID, subscriberStateTypes, []string{subscriberID}) + if err != nil { + return obsidian.HttpError(err, http.StatusInternalServerError) + } + statesByType := map[string]state.State{} + for stateID, st := range states { + statesByType[stateID.Type] = st + } + + ret := (<emodels.Subscriber{}).FromBackendModels(ent, statesByType) return c.JSON(http.StatusOK, ret) } diff --git a/lte/cloud/go/plugin/handlers/handlers_test.go b/lte/cloud/go/plugin/handlers/handlers_test.go index db9c71af5089..ec08339bb5b0 100644 --- a/lte/cloud/go/plugin/handlers/handlers_test.go +++ b/lte/cloud/go/plugin/handlers/handlers_test.go @@ -1211,7 +1211,7 @@ func TestListAndGetGateways(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) _ = plugin.RegisterPluginForTests(t, <ePlugin.LteOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) stateTestInit.StartTestService(t) @@ -1407,7 +1407,7 @@ func TestUpdateGateway(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) _ = plugin.RegisterPluginForTests(t, <ePlugin.LteOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) deviceTestInit.StartTestService(t) @@ -1551,7 +1551,7 @@ func TestDeleteGateway(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) _ = plugin.RegisterPluginForTests(t, <ePlugin.LteOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) deviceTestInit.StartTestService(t) @@ -2498,7 +2498,7 @@ func TestGetEnodebState(t *testing.T) { // report state clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) // encode the appropriate certificate into context ctx := test_utils.GetContextWithCertificate(t, "hwid1") @@ -2674,6 +2674,7 @@ func TestListSubscribers(t *testing.T) { test_init.StartTestService(t) deviceTestInit.StartTestService(t) + stateTestInit.StartTestService(t) err := configurator.CreateNetwork(configurator.Network{ID: "n1"}) assert.NoError(t, err) @@ -2765,6 +2766,60 @@ func TestListSubscribers(t *testing.T) { }), } tests.RunUnitTest(t, e, tc) + + // Now create ICMP state for 1234567890 + // First we need to register a gateway which can report state + _, err = configurator.CreateEntity( + "n1", + configurator.NetworkEntity{Type: orc8r.MagmadGatewayType, Key: "g1", Config: &models.MagmadGatewayConfigs{}, PhysicalID: "hw1"}, + ) + assert.NoError(t, err) + frozenClock := int64(1000000) + clock.SetAndFreezeClock(t, time.Unix(frozenClock, 0)) + defer clock.UnfreezeClock(t) + icmpStatus := <eModels.IcmpStatus{LatencyMs: f32Ptr(12.34)} + ctx := test_utils.GetContextWithCertificate(t, "hw1") + test_utils.ReportState(t, ctx, lte.ICMPStateType, "IMSI1234567890", icmpStatus) + + tc = tests.Test{ + Method: "GET", + URL: testURLRoot, + Handler: listSubscribers, + ParamNames: []string{"network_id"}, + ParamValues: []string{"n1"}, + ExpectedStatus: 200, + ExpectedResult: tests.JSONMarshaler(map[string]*lteModels.Subscriber{ + "IMSI1234567890": { + ID: "IMSI1234567890", + Lte: <eModels.LteSubscription{ + AuthAlgo: "MILENAGE", + AuthKey: []byte("\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"), + AuthOpc: []byte("\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"), + State: "ACTIVE", + SubProfile: "default", + }, + ActiveApns: lteModels.ApnList{apn2, apn1}, + Monitoring: <eModels.SubscriberStatus{ + Icmp: <eModels.IcmpStatus{ + LastReportedTime: frozenClock, + LatencyMs: f32Ptr(12.34), + }, + }, + }, + "IMSI0987654321": { + ID: "IMSI0987654321", + Lte: <eModels.LteSubscription{ + AuthAlgo: "MILENAGE", + AuthKey: []byte("\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22"), + AuthOpc: []byte("\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22\x22"), + State: "ACTIVE", + SubProfile: "foo", + }, + ActiveApns: lteModels.ApnList{apn1}, + }, + }), + } + tests.RunUnitTest(t, e, tc) } func TestGetSubscriber(t *testing.T) { @@ -2773,6 +2828,7 @@ func TestGetSubscriber(t *testing.T) { test_init.StartTestService(t) deviceTestInit.StartTestService(t) + stateTestInit.StartTestService(t) err := configurator.CreateNetwork(configurator.Network{ID: "n1"}) assert.NoError(t, err) @@ -2839,6 +2895,47 @@ func TestGetSubscriber(t *testing.T) { }, } tests.RunUnitTest(t, e, tc) + + // Now create ICMP state + // First we need to register a gateway which can report state + _, err = configurator.CreateEntity( + "n1", + configurator.NetworkEntity{Type: orc8r.MagmadGatewayType, Key: "g1", Config: &models.MagmadGatewayConfigs{}, PhysicalID: "hw1"}, + ) + assert.NoError(t, err) + frozenClock := int64(1000000) + clock.SetAndFreezeClock(t, time.Unix(frozenClock, 0)) + defer clock.UnfreezeClock(t) + icmpStatus := <eModels.IcmpStatus{LatencyMs: f32Ptr(12.34)} + ctx := test_utils.GetContextWithCertificate(t, "hw1") + test_utils.ReportState(t, ctx, lte.ICMPStateType, "IMSI1234567890", icmpStatus) + + tc = tests.Test{ + Method: "GET", + URL: testURLRoot, + Handler: getSubscriber, + ParamNames: []string{"network_id", "subscriber_id"}, + ParamValues: []string{"n1", "IMSI1234567890"}, + ExpectedStatus: 200, + ExpectedResult: <eModels.Subscriber{ + ID: "IMSI1234567890", + Lte: <eModels.LteSubscription{ + AuthAlgo: "MILENAGE", + AuthKey: []byte("\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"), + AuthOpc: []byte("\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"), + State: "ACTIVE", + SubProfile: "default", + }, + ActiveApns: lteModels.ApnList{apn2, apn1}, + Monitoring: <eModels.SubscriberStatus{ + Icmp: <eModels.IcmpStatus{ + LastReportedTime: frozenClock, + LatencyMs: f32Ptr(12.34), + }, + }, + }, + } + tests.RunUnitTest(t, e, tc) } func TestUpdateSubscriber(t *testing.T) { @@ -3723,3 +3820,7 @@ func newDefaultGatewayConfig() *lteModels.GatewayCellularConfigs { }, } } + +func f32Ptr(f float32) *float32 { + return &f +} diff --git a/lte/cloud/go/plugin/models/conversion.go b/lte/cloud/go/plugin/models/conversion.go index a59b8832cacd..cb6c5f5579fd 100644 --- a/lte/cloud/go/plugin/models/conversion.go +++ b/lte/cloud/go/plugin/models/conversion.go @@ -13,6 +13,7 @@ import ( "fmt" "log" "sort" + "time" "magma/lte/cloud/go/lte" "magma/lte/cloud/go/protos" @@ -21,6 +22,7 @@ import ( "magma/orc8r/cloud/go/pluginimpl/handlers" orc8rModels "magma/orc8r/cloud/go/pluginimpl/models" "magma/orc8r/cloud/go/services/configurator" + "magma/orc8r/cloud/go/services/state" "magma/orc8r/cloud/go/storage" merrors "magma/orc8r/lib/go/errors" orc8rProtos "magma/orc8r/lib/go/protos" @@ -28,6 +30,7 @@ import ( "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/golang/glog" "github.com/thoas/go-funk" ) @@ -446,7 +449,7 @@ func (m *Enodeb) ToEntityUpdateCriteria() configurator.EntityUpdateCriteria { } } -func (m *Subscriber) FromBackendModels(ent configurator.NetworkEntity) *Subscriber { +func (m *Subscriber) FromBackendModels(ent configurator.NetworkEntity, statesByType map[string]state.State) *Subscriber { m.ID = SubscriberID(ent.Key) m.Lte = ent.Config.(*LteSubscription) // If no profile in backend, return "default" @@ -458,6 +461,22 @@ func (m *Subscriber) FromBackendModels(ent configurator.NetworkEntity) *Subscrib m.ActiveApns = append(m.ActiveApns, tk.Key) } } + + if !funk.IsEmpty(statesByType) { + m.Monitoring = &SubscriberStatus{} + } + + for stateType, stateVal := range statesByType { + switch stateType { + case lte.ICMPStateType: + reportedState := stateVal.ReportedState.(*IcmpStatus) + // reported time is unix timestamp in seconds, so divide ms by 1k + reportedState.LastReportedTime = int64(stateVal.TimeMs / uint64(time.Second/time.Millisecond)) + m.Monitoring.Icmp = reportedState + default: + glog.Errorf("Loaded unrecognized subscriber state type %s", stateType) + } + } return m } diff --git a/lte/cloud/go/plugin/models/icmp_status_swaggergen.go b/lte/cloud/go/plugin/models/icmp_status_swaggergen.go index e6ca1d3ab09a..41fff43040e3 100644 --- a/lte/cloud/go/plugin/models/icmp_status_swaggergen.go +++ b/lte/cloud/go/plugin/models/icmp_status_swaggergen.go @@ -18,8 +18,7 @@ import ( type IcmpStatus struct { // last reported time - // Required: true - LastReportedTime *int64 `json:"last_reported_time"` + LastReportedTime int64 `json:"last_reported_time,omitempty"` // latency ms // Required: true @@ -30,10 +29,6 @@ type IcmpStatus struct { func (m *IcmpStatus) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateLastReportedTime(formats); err != nil { - res = append(res, err) - } - if err := m.validateLatencyMs(formats); err != nil { res = append(res, err) } @@ -44,15 +39,6 @@ func (m *IcmpStatus) Validate(formats strfmt.Registry) error { return nil } -func (m *IcmpStatus) validateLastReportedTime(formats strfmt.Registry) error { - - if err := validate.Required("last_reported_time", "body", m.LastReportedTime); err != nil { - return err - } - - return nil -} - func (m *IcmpStatus) validateLatencyMs(formats strfmt.Registry) error { if err := validate.Required("latency_ms", "body", m.LatencyMs); err != nil { diff --git a/lte/cloud/go/plugin/models/swagger.v1.yml b/lte/cloud/go/plugin/models/swagger.v1.yml index 16bf5137ff31..9c0082072a8d 100644 --- a/lte/cloud/go/plugin/models/swagger.v1.yml +++ b/lte/cloud/go/plugin/models/swagger.v1.yml @@ -2420,7 +2420,6 @@ definitions: description: ICMP status of a subscriber device required: - latency_ms - - last_reported_time properties: latency_ms: type: number diff --git a/lte/cloud/go/plugin/models/validate.go b/lte/cloud/go/plugin/models/validate.go index 9b3926e9c1db..1ac700ba8099 100644 --- a/lte/cloud/go/plugin/models/validate.go +++ b/lte/cloud/go/plugin/models/validate.go @@ -345,3 +345,10 @@ func (m *Apn) ValidateModel() error { } return nil } + +func (m *IcmpStatus) ValidateModel() error { + if err := m.Validate(strfmt.Default); err != nil { + return err + } + return nil +} diff --git a/lte/cloud/go/plugin/plugin.go b/lte/cloud/go/plugin/plugin.go index 70ab13c019a5..9cf14568403a 100644 --- a/lte/cloud/go/plugin/plugin.go +++ b/lte/cloud/go/plugin/plugin.go @@ -46,6 +46,7 @@ func (*LteOrchestratorPlugin) GetServices() []registry.ServiceLocation { func (*LteOrchestratorPlugin) GetSerdes() []serde.Serde { return []serde.Serde{ state.NewStateSerde(lte.EnodebStateType, <eModels.EnodebState{}), + state.NewStateSerde(lte.ICMPStateType, <eModels.IcmpStatus{}), // Configurator serdes configurator.NewNetworkConfigSerde(lte.CellularNetworkType, <eModels.NetworkCellularConfigs{}), diff --git a/lte/gateway/Makefile b/lte/gateway/Makefile index bca7ab73091e..2962b99f0e3c 100644 --- a/lte/gateway/Makefile +++ b/lte/gateway/Makefile @@ -1,4 +1,4 @@ -.PHONY: all build test clean run status +.PHONY: all build clean help log logs run status test GATEWAY_C_DIR = $(MAGMA_ROOT)/lte/gateway/c GRPC_CPP_PLUGIN_PATH ?= `which grpc_cpp_plugin` @@ -10,35 +10,46 @@ TEST_FLAG = -DBUILD_TESTS=1 all: build -build: build_python build_common build_oai build_sctpd build_session_manager +build: build_python build_common build_oai build_sctpd build_session_manager ## Build all -test: test_python test_oai test_session_manager +test: test_python test_oai test_session_manager ## Run all tests # Requires install of afl from http://lcamtuf.coredump.cx/afl/ -fuzz: build_fuzz +fuzz: build_fuzz ## Fuzz test mkdir -p ~/fuzz/input ~/fuzz/output && \ echo "testtest" > ~/fuzz/input/sample1 && \ afl-fuzz -i ~/fuzz/input/ -o ~/fuzz/output/ -m 300M ~/build/c/oai_fuzz/oai_fuzz/oai_fuzz nas -clean: clean_python +clean: clean_python ## Clean all builds rm -rf $(C_BUILD) -clean_python: +clean_python: ## Clean Python-only builds make -C $(MAGMA_ROOT)/lte/gateway/python clean -start: +start: ## Start all services sudo service magma@magmad start -stop: +stop: ## Stop all services sudo service magma@* stop -restart: stop start +restart: stop start ## Restart all services -run: build restart +run: build restart ## Build and run all services -status: +status: ## Status of all services sudo service magma@* status +log: ## Follow logs for magmad service + sudo journalctl -fu magma@magmad | egrep 'error|$$' -i --color + +logs: ## Follow logs for all services + sudo journalctl -fu magma@* | egrep 'error|$$' -i --color + +# Ref: https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html +help: ## Show documented commands + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-25s\033[0m %s\n", $$1, $$2}' + + # run_cmake BUILD_DIRECTORY, FILE_DIRECTORY, FLAGS, ENV define run_cmake mkdir -p $(1) @@ -63,23 +74,23 @@ $(call run_cmake, $(1), $(2), $(3) $(TEST_FLAG)) cd $(1) && ctest --output-on-failure endef -build_python: stop +build_python: stop ## Build Python environment make -C $(MAGMA_ROOT)/lte/gateway/python buildenv -build_common: +build_common: ## Build shared libraries $(call run_cmake, $(C_BUILD)/magma_common, $(MAGMA_ROOT)/orc8r/gateway/c/common, ) -build_oai: build_common +build_oai: build_common ## Build OAI $(call run_cmake, $(C_BUILD)/oai, $(GATEWAY_C_DIR)/oai, $(OAI_FLAGS)) -build_sctpd: build_common +build_sctpd: build_common ## Build SCTPD $(call run_cmake, $(C_BUILD)/sctpd, $(GATEWAY_C_DIR)/sctpd, ) # Requires install of afl from http://lcamtuf.coredump.cx/afl/ -build_fuzz: +build_fuzz: ## Build fuzz tool $(call run_cmake, $(C_BUILD)/oai_fuzz, $(GATEWAY_C_DIR)/oai, $(FUZZ_FLAGS), CC=afl-gcc CXX=afl-g++ AFL_USE_ASAN=1) -build_session_manager: build_common +build_session_manager: build_common ## Build session manager $(call run_cmake, $(C_BUILD)/session_manager, $(GATEWAY_C_DIR)/session_manager, ) # Catch all for c services that don't have custom flags @@ -87,17 +98,17 @@ build_session_manager: build_common build_%: $(call run_cmake, $(C_BUILD)/$*, $(MAGMA_ROOT)/c/$*, ) -scan_oai: +scan_oai: ## Scan OAI $(call run_scanbuild, $(C_BUILD)/scan/oai, $(GATEWAY_C_DIR)/oai, $(OAI_FLAGS)) -format_oai: +format_oai: ## Format OAI find $(GATEWAY_C_DIR)/oai \( -iname "*.c" -o -iname "*.cpp" -o -iname "*.h" \) -exec \ clang-format --style=file -i {} \; -test_python: stop +test_python: stop ## Run all Python-specific tests make -C $(MAGMA_ROOT)/lte/gateway/python test_all -test_oai: build_common +test_oai: build_common ## Run all OAI-specific tests $(call run_ctest, $(C_BUILD)/oai, $(GATEWAY_C_DIR)/oai, $(OAI_FLAGS)) # Catch all for c service tests @@ -116,7 +127,7 @@ COV_HTML_DIR_TOTAL = $(MAGMA_ROOT)/c/code_coverage SLEEP_SECS = 10 -coverage: +coverage: ## Generate full code coverage report # Remove any previous gcov output files rm -f `find $(OAI_BUILD) -name *.gcda` rm -f `find $(C_BUILD)/session_manager -name *.gcda` @@ -137,7 +148,7 @@ coverage: genhtml $(COV_OUTPUT_TOTAL) --output-directory $(COV_HTML_DIR_TOTAL) @echo "Generated coverage output to $(COV_HTML_DIR_TOTAL)/index.html" -coverage_oai: +coverage_oai: ## Generate code coverage report for OAI rm -f `find $(OAI_BUILD) -name *.gcda` sudo pkill -USR1 mme @echo "Waiting for $(SLEEP_SECS) for gcov to write files" @@ -147,7 +158,7 @@ coverage_oai: genhtml $(COV_OUTPUT_OAI) --output-directory $(COV_HTML_DIR_OAI) @echo "Generated coverage output to $(COV_HTML_DIR_OAI)/index.html" -coverage_sm: +coverage_sm: ## Generate code coverage report for session manager rm -f `find $(C_BUILD)/session_manager -name *.gcda` sudo pkill -INT sessiond sleep $(SLEEP_SECS) @@ -156,7 +167,6 @@ coverage_sm: genhtml $(COV_OUTPUT_SM) --output-directory $(COV_HTML_DIR_SM) @echo "Generated coverage output to $(COV_HTML_DIR_SM)/index.html" -code_stats: - # Get line counts for entire magma project. Exclude auto-generated code. +code_stats: ## Generate lines-of-code statistics for magma project sudo apt-get install -y cloc - cloc . --exclude-dir=cloud/go/src/magma/obsidian/models + cloc . diff --git a/lte/gateway/c/oai/include/sgw_messages_def.h b/lte/gateway/c/oai/include/gx_messages_def.h similarity index 87% rename from lte/gateway/c/oai/include/sgw_messages_def.h rename to lte/gateway/c/oai/include/gx_messages_def.h index b48e8943824a..e24928ecff0f 100644 --- a/lte/gateway/c/oai/include/sgw_messages_def.h +++ b/lte/gateway/c/oai/include/gx_messages_def.h @@ -28,13 +28,13 @@ */ MESSAGE_DEF( - PGW_NW_INITIATED_ACTIVATE_BEARER_REQ, + GX_NW_INITIATED_ACTIVATE_BEARER_REQ, MESSAGE_PRIORITY_MED, - itti_pgw_nw_init_actv_bearer_request_t, - pgw_nw_init_actv_bearer_request) + itti_gx_nw_init_actv_bearer_request_t, + gx_nw_init_actv_bearer_request) MESSAGE_DEF( - PGW_NW_INITIATED_DEACTIVATE_BEARER_REQ, + GX_NW_INITIATED_DEACTIVATE_BEARER_REQ, MESSAGE_PRIORITY_MED, - itti_pgw_nw_init_deactv_bearer_request_t, - pgw_nw_init_deactv_bearer_request) + itti_gx_nw_init_deactv_bearer_request_t, + gx_nw_init_deactv_bearer_request) diff --git a/lte/gateway/c/oai/include/sgw_messages_types.h b/lte/gateway/c/oai/include/gx_messages_types.h similarity index 79% rename from lte/gateway/c/oai/include/sgw_messages_types.h rename to lte/gateway/c/oai/include/gx_messages_types.h index d5ec86cc85aa..753665537f91 100644 --- a/lte/gateway/c/oai/include/sgw_messages_types.h +++ b/lte/gateway/c/oai/include/gx_messages_types.h @@ -27,7 +27,7 @@ * either expressed or implied, of the FreeBSD Project. */ -/*! \file sgw_messages_types.h +/*! \file gx_messages_types.h * \brief S11 definitions for interaction between MME and S11 * 3GPP TS 29.274. * Messages are the same as for GTPv2-C but here we abstract the UDP layer @@ -36,29 +36,29 @@ * \version 0.1 */ -#ifndef FILE_SGW_MESSAGES_TYPES_SEEN -#define FILE_SGW_MESSAGES_TYPES_SEEN +#ifndef FILE_GX_MESSAGES_TYPES_SEEN +#define FILE_GX_MESSAGES_TYPES_SEEN -#define PGW_NW_INITIATED_ACTIVATE_BEARER_REQ(mSGpTR) \ - (mSGpTR)->ittiMsg.pgw_nw_init_actv_bearer_request -#define PGW_NW_INITIATED_DEACTIVATE_BEARER_REQ(mSGpTR) \ - (mSGpTR)->ittiMsg.pgw_nw_init_deactv_bearer_request +#define GX_NW_INITIATED_ACTIVATE_BEARER_REQ(mSGpTR) \ + (mSGpTR)->ittiMsg.gx_nw_init_actv_bearer_request +#define GX_NW_INITIATED_DEACTIVATE_BEARER_REQ(mSGpTR) \ + (mSGpTR)->ittiMsg.gx_nw_init_deactv_bearer_request -typedef struct itti_pgw_nw_init_actv_bearer_request_s { +typedef struct itti_gx_nw_init_actv_bearer_request_s { char imsi[IMSI_BCD_DIGITS_MAX + 1]; uint8_t imsi_length; ebi_t lbi; traffic_flow_template_t ul_tft; traffic_flow_template_t dl_tft; bearer_qos_t eps_bearer_qos; -} itti_pgw_nw_init_actv_bearer_request_t; +} itti_gx_nw_init_actv_bearer_request_t; -typedef struct itti_pgw_nw_init_deactv_bearer_request_s { +typedef struct itti_gx_nw_init_deactv_bearer_request_s { char imsi[IMSI_BCD_DIGITS_MAX + 1]; uint8_t imsi_length; uint32_t no_of_bearers; ebi_t lbi; ebi_t ebi[BEARERS_PER_UE]; -} itti_pgw_nw_init_deactv_bearer_request_t; +} itti_gx_nw_init_deactv_bearer_request_t; -#endif /* FILE_SGW_MESSAGES_TYPES_SEEN */ +#endif /* FILE_GX_MESSAGES_TYPES_SEEN */ diff --git a/lte/gateway/c/oai/include/messages_def.h b/lte/gateway/c/oai/include/messages_def.h index a570d86c451a..710835f79d05 100644 --- a/lte/gateway/c/oai/include/messages_def.h +++ b/lte/gateway/c/oai/include/messages_def.h @@ -37,8 +37,7 @@ #include "s1ap_messages_def.h" #include "s6a_messages_def.h" #include "sctp_messages_def.h" -#include "sgw_messages_def.h" -#include "s5_messages_def.h" +#include "gx_messages_def.h" #include "mme_app_messages_def.h" #include "service303_messages_def.h" #include "sgs_messages_def.h" diff --git a/lte/gateway/c/oai/include/messages_types.h b/lte/gateway/c/oai/include/messages_types.h index 7689e31d74b6..8011cb6e3ea9 100644 --- a/lte/gateway/c/oai/include/messages_types.h +++ b/lte/gateway/c/oai/include/messages_types.h @@ -38,11 +38,10 @@ #include "gtpv1_u_messages_types.h" #include "ip_forward_messages_types.h" #include "s11_messages_types.h" -#include "s5_messages_types.h" #include "s1ap_messages_types.h" #include "s6a_messages_types.h" #include "sctp_messages_types.h" -#include "sgw_messages_types.h" +#include "gx_messages_types.h" #include "mme_app_messages_types.h" #include "service303_messages_types.h" #include "sgs_messages_types.h" diff --git a/lte/gateway/c/oai/include/s5_messages_def.h b/lte/gateway/c/oai/include/s5_messages_def.h deleted file mode 100644 index 8db1ff4eefa6..000000000000 --- a/lte/gateway/c/oai/include/s5_messages_def.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2015, EURECOM (www.eurecom.fr) - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are those - * of the authors and should not be interpreted as representing official policies, - * either expressed or implied, of the FreeBSD Project. - */ -//WARNING: Do not include this header directly. Use intertask_interface.h instead. - -MESSAGE_DEF( - S5_NW_INITIATED_ACTIVATE_BEARER_REQ, - MESSAGE_PRIORITY_MED, - itti_s5_nw_init_actv_bearer_request_t, - s5_nw_init_actv_bearer_request) -MESSAGE_DEF( - S5_NW_INITIATED_ACTIVATE_BEARER_RESP, - MESSAGE_PRIORITY_MED, - itti_s5_nw_init_actv_bearer_rsp_t, - s5_nw_init_actv_bearer_response) -MESSAGE_DEF( - S5_NW_INITIATED_DEACTIVATE_BEARER_REQ, - MESSAGE_PRIORITY_MED, - itti_s5_nw_init_deactv_bearer_request_t, - s5_nw_init_deactv_bearer_request) -MESSAGE_DEF( - S5_NW_INITIATED_DEACTIVATE_BEARER_RESP, - MESSAGE_PRIORITY_MED, - itti_s5_nw_init_deactv_bearer_rsp_t, - s5_nw_init_deactv_bearer_response) diff --git a/lte/gateway/c/oai/include/s5_messages_types.h b/lte/gateway/c/oai/include/s5_messages_types.h deleted file mode 100644 index 55523e04bba7..000000000000 --- a/lte/gateway/c/oai/include/s5_messages_types.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright (c) 2015, EURECOM (www.eurecom.fr) - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, this - * list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are those - * of the authors and should not be interpreted as representing official policies, - * either expressed or implied, of the FreeBSD Project. - */ -#ifndef FILE_S5_MESSAGES_TYPES_SEEN -#define FILE_S5_MESSAGES_TYPES_SEEN - -#include "sgw_ie_defs.h" - -#define S5_NW_INITIATED_ACTIVATE_BEARER_REQ(mSGpTR) \ - (mSGpTR)->ittiMsg.s5_nw_init_actv_bearer_request -#define S5_NW_INITIATED_ACTIVATE_BEARER_RESP(mSGpTR) \ - (mSGpTR)->ittiMsg.s5_nw_init_actv_bearer_response -#define S5_NW_INITIATED_DEACTIVATE_BEARER_REQ(mSGpTR) \ - (mSGpTR)->ittiMsg.s5_nw_init_deactv_bearer_request -#define S5_NW_INITIATED_DEACTIVATE_BEARER_RESP(mSGpTR) \ - (mSGpTR)->ittiMsg.s5_nw_init_deactv_bearer_response - -typedef struct itti_s5_nw_init_actv_bearer_request_s { - ebi_t lbi;///< linked Bearer ID - teid_t mme_teid_S11; - teid_t s_gw_teid_S11_S4; - bearer_qos_t eps_bearer_qos; ///< Bearer QoS - traffic_flow_template_t ul_tft; ///< UL TFT will be sent to UE - traffic_flow_template_t dl_tft; ///< DL TFT will be stored at SPGW - protocol_configuration_options_t pco; ///< PCO protocol_configuration_options -} itti_s5_nw_init_actv_bearer_request_t; - -typedef struct itti_s5_nw_init_actv_bearer_rsp_s { - gtpv2c_cause_value_t cause; - Imsi_t imsi; - ebi_t lbi; - ebi_t ebi; ///sid().id(); // If north bound is sessiond itself, IMSI prefix is used; // in S1AP tests, IMSI prefix is not used @@ -197,7 +197,7 @@ Status SpgwServiceImpl::DeleteBearer( DeleteBearerResult* response) { OAILOG_INFO(LOG_UTIL, "Received DeleteBearer GRPC request\n"); - itti_pgw_nw_init_deactv_bearer_request_t itti_msg; + itti_gx_nw_init_deactv_bearer_request_t itti_msg; itti_msg.imsi_length = request->sid().id().size(); strcpy(itti_msg.imsi, request->sid().id().c_str()); itti_msg.lbi = request->link_bearer_id(); diff --git a/lte/gateway/c/oai/tasks/grpc_service/spgw_service_handler.c b/lte/gateway/c/oai/tasks/grpc_service/spgw_service_handler.c index b044792d9039..6073b70fd574 100644 --- a/lte/gateway/c/oai/tasks/grpc_service/spgw_service_handler.c +++ b/lte/gateway/c/oai/tasks/grpc_service/spgw_service_handler.c @@ -27,30 +27,30 @@ #include "intertask_interface_types.h" #include "itti_types.h" #include "log.h" -#include "sgw_messages_types.h" +#include "gx_messages_types.h" int send_activate_bearer_request_itti( - itti_pgw_nw_init_actv_bearer_request_t *itti_msg) + itti_gx_nw_init_actv_bearer_request_t* itti_msg) { - OAILOG_DEBUG(LOG_SPGW_APP, "Sending pgw_nw_init_actv_bearer_request\n"); - MessageDef *message_p = itti_alloc_new_message( - TASK_GRPC_SERVICE, PGW_NW_INITIATED_ACTIVATE_BEARER_REQ); - message_p->ittiMsg.pgw_nw_init_actv_bearer_request = *itti_msg; + OAILOG_DEBUG(LOG_SPGW_APP, "Sending nw_init_actv_bearer_request to SPGW \n"); + MessageDef* message_p = itti_alloc_new_message( + TASK_GRPC_SERVICE, GX_NW_INITIATED_ACTIVATE_BEARER_REQ); + message_p->ittiMsg.gx_nw_init_actv_bearer_request = *itti_msg; IMSI_STRING_TO_IMSI64((char*) itti_msg->imsi, &message_p->ittiMsgHeader.imsi); - return itti_send_msg_to_task(TASK_PGW_APP, INSTANCE_DEFAULT, message_p); + return itti_send_msg_to_task(TASK_SPGW_APP, INSTANCE_DEFAULT, message_p); } int send_deactivate_bearer_request_itti( - itti_pgw_nw_init_deactv_bearer_request_t* itti_msg) + itti_gx_nw_init_deactv_bearer_request_t* itti_msg) { - OAILOG_DEBUG(LOG_SPGW_APP, "Sending pgw_nw_init_deactv_bearer_request\n"); + OAILOG_DEBUG(LOG_SPGW_APP, "Sending spgw_nw_init_deactv_bearer_request\n"); MessageDef* message_p = itti_alloc_new_message( - TASK_GRPC_SERVICE, PGW_NW_INITIATED_DEACTIVATE_BEARER_REQ); - message_p->ittiMsg.pgw_nw_init_deactv_bearer_request = *itti_msg; + TASK_GRPC_SERVICE, GX_NW_INITIATED_DEACTIVATE_BEARER_REQ); + message_p->ittiMsg.gx_nw_init_deactv_bearer_request = *itti_msg; IMSI_STRING_TO_IMSI64((char*) itti_msg->imsi, &message_p->ittiMsgHeader.imsi); - return itti_send_msg_to_task(TASK_PGW_APP, INSTANCE_DEFAULT, message_p); + return itti_send_msg_to_task(TASK_SPGW_APP, INSTANCE_DEFAULT, message_p); } diff --git a/lte/gateway/c/oai/tasks/mme_app/mme_app_embedded_spgw.h b/lte/gateway/c/oai/tasks/mme_app/mme_app_embedded_spgw.h index 7cc93fb4190c..d083e5407145 100644 --- a/lte/gateway/c/oai/tasks/mme_app/mme_app_embedded_spgw.h +++ b/lte/gateway/c/oai/tasks/mme_app/mme_app_embedded_spgw.h @@ -23,7 +23,6 @@ #include "mme_config.h" #include "spgw_config.h" #include "sgw_defs.h" -#include "pgw_defs.h" int mme_config_embedded_spgw_parse_opt_line( int argc, diff --git a/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.c b/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.c deleted file mode 100644 index 4274eaac12eb..000000000000 --- a/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.c +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -#include "conversions.h" -#include "common_types.h" -#include "log.h" -#include "scribe_rpc_client.h" - -void log_ue_state_to_scribe( - imsi64_t imsi, - uint8_t imsi_len, - const char *ue_status) -{ - char imsi_str[16]; - IMSI64_TO_STRING(imsi, imsi_str, imsi_len); - scribe_string_param_t str_params[] = { - {"ue_status", ue_status}, - {"imsi", imsi_str}, - }; - char const *category = "perfpipe_magma_ue_stats"; - int status = log_to_scribe(category, NULL, 0, str_params, 2); - if (status != 0) { - OAILOG_ERROR( - LOG_MME_APP, - "Failed to log to scribe category %s, log status: %d \n", - category, - status); - } -} diff --git a/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.h b/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.h deleted file mode 100644 index fd1949b4e912..000000000000 --- a/lte/gateway/c/oai/tasks/mme_app/scribe_log_helper.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -#ifndef SCRIBE_LOG_HELPER_H -#define SCRIBE_LOG_HELPER_H -#include "common_types.h" - -// helper function to log ue status in OAI. -void log_ue_state_to_scribe( - imsi64_t imsi, - uint8_t imsi_len, - const char *ue_status); - -#endif /* SCRIBE_LOG_HELPER_H */ diff --git a/lte/gateway/c/oai/tasks/sgw/CMakeLists.txt b/lte/gateway/c/oai/tasks/sgw/CMakeLists.txt index 5f15b54e5b8e..994d3838072c 100644 --- a/lte/gateway/c/oai/tasks/sgw/CMakeLists.txt +++ b/lte/gateway/c/oai/tasks/sgw/CMakeLists.txt @@ -22,7 +22,6 @@ add_library(TASK_SGW s11_causes.c sgw_task.c sgw_handlers.c - pgw_task.c pgw_handlers.c sgw_context_manager.c pgw_pco.c diff --git a/lte/gateway/c/oai/tasks/sgw/pgw_defs.h b/lte/gateway/c/oai/tasks/sgw/pgw_defs.h deleted file mode 100644 index 51aef9683838..000000000000 --- a/lte/gateway/c/oai/tasks/sgw/pgw_defs.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -#ifndef FILE_PGW_DEFS_SEEN -#define FILE_PGW_DEFS_SEEN -#include "spgw_config.h" -int pgw_init(spgw_config_t *spgw_config_pP); - -#endif /* FILE_PGW_DEFS_SEEN */ diff --git a/lte/gateway/c/oai/tasks/sgw/pgw_handlers.c b/lte/gateway/c/oai/tasks/sgw/pgw_handlers.c index d4ebb21b08b9..ce621b53e786 100644 --- a/lte/gateway/c/oai/tasks/sgw/pgw_handlers.c +++ b/lte/gateway/c/oai/tasks/sgw/pgw_handlers.c @@ -39,8 +39,10 @@ #include "log.h" #include "spgw_config.h" #include "pgw_pco.h" +#include "dynamic_memory_check.h" #include "pgw_ue_ip_address_alloc.h" #include "pgw_handlers.h" +#include "sgw_handlers.h" #include "pcef_handlers.h" #include "common_defs.h" #include "3gpp_23.003.h" @@ -57,15 +59,42 @@ #include "service303.h" #include "sgw_context_manager.h" #include "sgw_ie_defs.h" +#include "pgw_procedures.h" -static void get_session_req_data( - spgw_state_t *spgw_state, - const itti_s11_create_session_request_t *saved_req, - struct pcef_create_session_data *data); -static char convert_digit_to_char(char digit); extern spgw_config_t spgw_config; -extern uint32_t sgw_get_new_s1u_teid(void); extern void print_bearer_ids_helper(const ebi_t*, uint32_t); + +static void _get_session_req_data( + spgw_state_t* spgw_state, + const itti_s11_create_session_request_t* saved_req, + struct pcef_create_session_data* data); + +static char _convert_digit_to_char(char digit); + +static int _spgw_build_and_send_s11_create_bearer_request( + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + spgw_state_t* spgw_state, + teid_t s1_u_sgw_fteid); + +static int _create_temporary_dedicated_bearer_context( + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + spgw_state_t* spgw_state, + teid_t s1_u_sgw_fteid); + +static void _delete_temporary_dedicated_bearer_context( + teid_t s1_u_sgw_fteid, + ebi_t lbi, + s_plus_p_gw_eps_bearer_context_information_t* spgw_context_p); + +static int32_t _spgw_build_and_send_s11_deactivate_bearer_req( + imsi64_t imsi64, + uint8_t no_of_bearers_to_be_deact, + ebi_t* ebi_to_be_deactivated, + bool delete_default_bearer, + teid_t mme_teid_S11); + //-------------------------------------------------------------------------------- void handle_s5_create_session_request( @@ -219,18 +248,18 @@ void handle_s5_create_session_request( } if (sgi_create_endpoint_resp.status == SGI_STATUS_OK) { // create session in PCEF and return - s5_create_session_request_t bearer_req = {0}; - bearer_req.context_teid = context_teid; - bearer_req.eps_bearer_id = eps_bearer_id; + s5_create_session_request_t session_req = {0}; + session_req.context_teid = context_teid; + session_req.eps_bearer_id = eps_bearer_id; char ip_str[INET_ADDRSTRLEN]; inet_ntop(AF_INET, &(inaddr.s_addr), ip_str, INET_ADDRSTRLEN); struct pcef_create_session_data session_data; - get_session_req_data( + _get_session_req_data( spgw_state, &new_bearer_ctxt_info_p->sgw_eps_bearer_context_information.saved_message, &session_data); pcef_create_session( - imsi, ip_str, &session_data, sgi_create_endpoint_resp, bearer_req); + imsi, ip_str, &session_data, sgi_create_endpoint_resp, session_req); OAILOG_FUNC_OUT(LOG_PGW_APP); } err: @@ -283,7 +312,7 @@ static int get_imeisv_from_session_req( * else if they are in [48,57] keep them the same * else log an error and return '0'=48 value */ -static char convert_digit_to_char(char digit) +static char _convert_digit_to_char(char digit) { if ((digit >= 0) && (digit <= 9)) { return (digit + '0'); @@ -302,14 +331,15 @@ static void get_plmn_from_session_req( const itti_s11_create_session_request_t* saved_req, struct pcef_create_session_data* data) { - data->mcc_mnc[0] = convert_digit_to_char(saved_req->serving_network.mcc[0]); - data->mcc_mnc[1] = convert_digit_to_char(saved_req->serving_network.mcc[1]); - data->mcc_mnc[2] = convert_digit_to_char(saved_req->serving_network.mcc[2]); - data->mcc_mnc[3] = convert_digit_to_char(saved_req->serving_network.mnc[0]); - data->mcc_mnc[4] = convert_digit_to_char(saved_req->serving_network.mnc[1]); + data->mcc_mnc[0] = _convert_digit_to_char(saved_req->serving_network.mcc[0]); + data->mcc_mnc[1] = _convert_digit_to_char(saved_req->serving_network.mcc[1]); + data->mcc_mnc[2] = _convert_digit_to_char(saved_req->serving_network.mcc[2]); + data->mcc_mnc[3] = _convert_digit_to_char(saved_req->serving_network.mnc[0]); + data->mcc_mnc[4] = _convert_digit_to_char(saved_req->serving_network.mnc[1]); data->mcc_mnc_len = 5; if ((saved_req->serving_network.mnc[2] & 0xf) != 0xf) { - data->mcc_mnc[5] = convert_digit_to_char(saved_req->serving_network.mnc[2]); + data->mcc_mnc[5] = + _convert_digit_to_char(saved_req->serving_network.mnc[2]); data->mcc_mnc[6] = '\0'; data->mcc_mnc_len += 1; } else { @@ -321,15 +351,15 @@ static void get_imsi_plmn_from_session_req( const itti_s11_create_session_request_t* saved_req, struct pcef_create_session_data* data) { - data->imsi_mcc_mnc[0] = convert_digit_to_char(saved_req->imsi.digit[0]); - data->imsi_mcc_mnc[1] = convert_digit_to_char(saved_req->imsi.digit[1]); - data->imsi_mcc_mnc[2] = convert_digit_to_char(saved_req->imsi.digit[2]); - data->imsi_mcc_mnc[3] = convert_digit_to_char(saved_req->imsi.digit[3]); - data->imsi_mcc_mnc[4] = convert_digit_to_char(saved_req->imsi.digit[4]); + data->imsi_mcc_mnc[0] = _convert_digit_to_char(saved_req->imsi.digit[0]); + data->imsi_mcc_mnc[1] = _convert_digit_to_char(saved_req->imsi.digit[1]); + data->imsi_mcc_mnc[2] = _convert_digit_to_char(saved_req->imsi.digit[2]); + data->imsi_mcc_mnc[3] = _convert_digit_to_char(saved_req->imsi.digit[3]); + data->imsi_mcc_mnc[4] = _convert_digit_to_char(saved_req->imsi.digit[4]); data->imsi_mcc_mnc_len = 5; // Check if 2 or 3 digit by verifying mnc[2] has a valid value if ((saved_req->serving_network.mnc[2] & 0xf) != 0xf) { - data->imsi_mcc_mnc[5] = convert_digit_to_char(saved_req->imsi.digit[5]); + data->imsi_mcc_mnc[5] = _convert_digit_to_char(saved_req->imsi.digit[5]); data->imsi_mcc_mnc[6] = '\0'; data->imsi_mcc_mnc_len += 1; } else { @@ -392,10 +422,10 @@ static int get_msisdn_from_session_req( return len; } -static void get_session_req_data( - spgw_state_t *spgw_state, - const itti_s11_create_session_request_t *saved_req, - struct pcef_create_session_data *data) +static void _get_session_req_data( + spgw_state_t* spgw_state, + const itti_s11_create_session_request_t* saved_req, + struct pcef_create_session_data* data) { const bearer_qos_t *qos; @@ -425,67 +455,45 @@ static void get_session_req_data( data->qci = qos->qci; } -//----------------------------------------------------------------------------- - -uint32_t pgw_handle_nw_initiated_bearer_actv_req( - spgw_state_t *spgw_state, - const itti_pgw_nw_init_actv_bearer_request_t *const bearer_req_p, - imsi64_t imsi64) +/* + * Handle NW initiated Dedicated Bearer Activation from SPGW service + */ +int spgw_handle_nw_initiated_bearer_actv_req( + spgw_state_t* spgw_state, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + imsi64_t imsi64, + gtpv2c_cause_value_t* failed_cause) { OAILOG_FUNC_IN(LOG_PGW_APP); - MessageDef *message_p = NULL; uint32_t i = 0; - uint32_t rc = RETURNok; - hash_table_ts_t *hashtblP = NULL; + int rc = RETURNok; + hash_table_ts_t* hashtblP = NULL; uint32_t num_elements = 0; - s_plus_p_gw_eps_bearer_context_information_t *spgw_ctxt_p = NULL; - hash_node_t *node = NULL; - itti_s5_nw_init_actv_bearer_request_t *itti_s5_actv_bearer_req = NULL; + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p = NULL; + hash_node_t* node = NULL; bool is_imsi_found = false; bool is_lbi_found = false; OAILOG_INFO( - LOG_PGW_APP, - "Received Create Bearer Req from PCRF with IMSI " IMSI_64_FMT, imsi64); - - message_p = - itti_alloc_new_message(TASK_SPGW_APP, S5_NW_INITIATED_ACTIVATE_BEARER_REQ); - if (message_p == NULL) { - OAILOG_ERROR( - LOG_PGW_APP, - "itti_alloc_new_message failed for" - "S5_NW_INITIATED_ACTIVATE_DEDICATED_BEARER_REQ\n"); - OAILOG_FUNC_RETURN(LOG_PGW_APP, RETURNerror); - } - itti_s5_actv_bearer_req = &message_p->ittiMsg.s5_nw_init_actv_bearer_request; - // Send ITTI message to SGW - memset( - itti_s5_actv_bearer_req, 0, sizeof(itti_s5_nw_init_actv_bearer_request_t)); - - // Copy Bearer QoS - memcpy( - &itti_s5_actv_bearer_req->eps_bearer_qos, - &bearer_req_p->eps_bearer_qos, - sizeof(bearer_qos_t)); - //Copy UL TFT to be sent to UE - memcpy( - &itti_s5_actv_bearer_req->ul_tft, - &bearer_req_p->ul_tft, - sizeof(traffic_flow_template_t)); - //Copy DL TFT. SGW creates a temporary bearer ctx and stores the DL TFT - memcpy( - &itti_s5_actv_bearer_req->dl_tft, - &bearer_req_p->dl_tft, - sizeof(traffic_flow_template_t)); + LOG_SPGW_APP, + "Received Create Bearer Req from PCRF with lbi:%d IMSI\n" IMSI_64_FMT, + bearer_req_p->lbi, + imsi64); hashtblP = spgw_state->sgw_state.s11_bearer_context_information; if (!hashtblP) { - OAILOG_ERROR(LOG_PGW_APP, "There is no UE Context in the SGW context \n"); - OAILOG_FUNC_RETURN(LOG_PGW_APP, RETURNerror); + OAILOG_ERROR( + LOG_SPGW_APP, "No s11_bearer_context_information hash table found \n"); + *failed_cause = REQUEST_REJECTED; + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); } - // Fetch S11 MME TEID using IMSI and LBI - while ((num_elements < hashtblP->num_elements) && (i < hashtblP->size)) { + /* On reception of Dedicated Bearer Activation Request from PCRF, + * SPGW shall identify whether valid PDN session exists for the UE + * using IMSI and LBI, for which Dedicated Bearer Activation is requested. + */ + while ((num_elements < hashtblP->num_elements) && (i < hashtblP->size) && + (!is_lbi_found)) { pthread_mutex_lock(&hashtblP->lock_nodes[i]); if (hashtblP->nodes[i] != NULL) { node = hashtblP->nodes[i]; @@ -506,11 +514,6 @@ uint32_t pgw_handle_nw_initiated_bearer_actv_req( spgw_ctxt_p->sgw_eps_bearer_context_information.pdn_connection .default_bearer == bearer_req_p->lbi) { is_lbi_found = true; - itti_s5_actv_bearer_req->lbi = bearer_req_p->lbi; - itti_s5_actv_bearer_req->mme_teid_S11 = - spgw_ctxt_p->sgw_eps_bearer_context_information.mme_teid_S11; - itti_s5_actv_bearer_req->s_gw_teid_S11_S4 = - spgw_ctxt_p->sgw_eps_bearer_context_information.s_gw_teid_S11_S4; break; } } @@ -522,51 +525,57 @@ uint32_t pgw_handle_nw_initiated_bearer_actv_req( if ((!is_imsi_found) || (!is_lbi_found)) { OAILOG_INFO( - LOG_PGW_APP, + LOG_SPGW_APP, "is_imsi_found (%d), is_lbi_found (%d)\n", is_imsi_found, is_lbi_found); OAILOG_ERROR( - LOG_PGW_APP, - "Sending dedicated_bearer_actv_rsp with REQUEST_REJECTED " - "cause to NW\n"); - // Send Reject to PCRF - // TODO-Uncomment once implemented at PCRF - /* rc = send_dedicated_bearer_actv_rsp(bearer_req_p->lbi, - REQUEST_REJECTED);*/ - OAILOG_FUNC_RETURN(LOG_PGW_APP, RETURNerror); + LOG_SPGW_APP, + "Sending dedicated_bearer_actv_rsp with REQUEST_REJECTED cause to NW\n"); + *failed_cause = REQUEST_REJECTED; + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); } - // Send S5_ACTIVATE_DEDICATED_BEARER_REQ to SGW APP - OAILOG_INFO( - LOG_PGW_APP, - "LBI for the received Create Bearer Req %d\n", - itti_s5_actv_bearer_req->lbi); - OAILOG_INFO( - LOG_PGW_APP, - "Sending S5_ACTIVATE_DEDICATED_BEARER_REQ to SGW with MME TEID %d\n", - itti_s5_actv_bearer_req->mme_teid_S11); - - message_p->ittiMsgHeader.imsi = imsi64; - - rc = itti_send_msg_to_task(TASK_SPGW_APP, INSTANCE_DEFAULT, message_p); - OAILOG_FUNC_RETURN(LOG_PGW_APP, rc); + teid_t s1_u_sgw_fteid = sgw_get_new_s1u_teid(spgw_state); + // Create temporary dedicated bearer context + rc = _create_temporary_dedicated_bearer_context( + spgw_ctxt_p, bearer_req_p, spgw_state, s1_u_sgw_fteid); + if (rc != RETURNok) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to create temporary dedicated bearer context for lbi: %u \n ", + bearer_req_p->lbi); + *failed_cause = REQUEST_REJECTED; + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + // Build and send ITTI message, s11_create_bearer_request to MME APP + rc = _spgw_build_and_send_s11_create_bearer_request( + spgw_ctxt_p, bearer_req_p, spgw_state, s1_u_sgw_fteid); + if (rc != RETURNok) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to build and send S11 Create Bearer Request for lbi :%u \n", + bearer_req_p->lbi); + + *failed_cause = REQUEST_REJECTED; + _delete_temporary_dedicated_bearer_context( + s1_u_sgw_fteid, bearer_req_p->lbi, spgw_ctxt_p); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNok); } //------------------------------------------------------------------------------ - -uint32_t pgw_handle_nw_initiated_bearer_deactv_req( - spgw_state_t *spgw_state, - const itti_pgw_nw_init_deactv_bearer_request_t *const bearer_req_p, +int32_t spgw_handle_nw_initiated_bearer_deactv_req( + spgw_state_t* spgw_state, + const itti_gx_nw_init_deactv_bearer_request_t* const bearer_req_p, imsi64_t imsi64) { - uint32_t rc = RETURNok; - OAILOG_FUNC_IN(LOG_PGW_APP); - MessageDef *message_p = NULL; - hash_table_ts_t *hashtblP = NULL; + OAILOG_FUNC_IN(LOG_SPGW_APP); + int32_t rc = RETURNok; + hash_table_ts_t* hashtblP = NULL; uint32_t num_elements = 0; - s_plus_p_gw_eps_bearer_context_information_t *spgw_ctxt_p = NULL; + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p = NULL; hash_node_t *node = NULL; - itti_s5_nw_init_deactv_bearer_request_t *itti_s5_deactv_ded_bearer_req = NULL; bool is_lbi_found = false; bool is_imsi_found = false; bool is_ebi_found = false; @@ -574,16 +583,17 @@ uint32_t pgw_handle_nw_initiated_bearer_deactv_req( uint32_t no_of_bearers_to_be_deact = 0; uint32_t no_of_bearers_rej = 0; ebi_t invalid_bearer_id[BEARERS_PER_UE] = {0}; - teid_t s11_mme_teid = 0; - OAILOG_INFO(LOG_PGW_APP, "Received nw_initiated_deactv_bearer_req from NW\n"); + OAILOG_INFO( + LOG_SPGW_APP, + "Received nw_initiated_deactv_bearer_req from SPGW service \n"); print_bearer_ids_helper(bearer_req_p->ebi, bearer_req_p->no_of_bearers); hashtblP = spgw_state->sgw_state.s11_bearer_context_information; if (hashtblP == NULL) { OAILOG_ERROR( - LOG_PGW_APP, "hashtblP is NULL for nw_initiated_deactv_bearer_req\n"); - OAILOG_FUNC_RETURN(LOG_PGW_APP, RETURNerror); + LOG_SPGW_APP, "No s11_bearer_context_information hash table is found\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); } // Check if valid LBI and EBI recvd @@ -605,8 +615,6 @@ uint32_t pgw_handle_nw_initiated_bearer_deactv_req( spgw_ctxt_p->sgw_eps_bearer_context_information.imsi.digit, (const char*) bearer_req_p->imsi)) { is_imsi_found = true; - s11_mme_teid = - spgw_ctxt_p->sgw_eps_bearer_context_information.mme_teid_S11; if ( (bearer_req_p->lbi != 0) && (bearer_req_p->lbi == @@ -646,94 +654,298 @@ uint32_t pgw_handle_nw_initiated_bearer_deactv_req( if ((!is_ebi_found) || (!is_lbi_found) || (!is_imsi_found) || (no_of_bearers_rej > 0)) { OAILOG_INFO( - LOG_PGW_APP, + LOG_SPGW_APP, "is_imsi_found (%d), is_lbi_found (%d), is_ebi_found (%d) \n", - is_imsi_found, is_lbi_found, is_ebi_found); + is_imsi_found, + is_lbi_found, + is_ebi_found); OAILOG_ERROR( - LOG_PGW_APP, - "Sending dedicated bearer deactivation reject to NW\n"); + LOG_SPGW_APP, "Sending dedicated bearer deactivation reject to NW\n"); print_bearer_ids_helper(invalid_bearer_id, no_of_bearers_rej); // TODO-Uncomment once implemented at PCRF /* rc = send_dedicated_bearer_deactv_rsp(invalid_bearer_id, REQUEST_REJECTED);*/ } - // Send ITTI message to SGW if (no_of_bearers_to_be_deact > 0) { - message_p = itti_alloc_new_message( - TASK_SPGW_APP, S5_NW_INITIATED_DEACTIVATE_BEARER_REQ); - if (message_p == NULL) { - OAILOG_ERROR( - LOG_PGW_APP, - "itti_alloc_new_message failed for nw_initiated_deactv_bearer_req\n"); - OAILOG_FUNC_RETURN(LOG_PGW_APP, RETURNerror); - } - itti_s5_deactv_ded_bearer_req = - &message_p->ittiMsg.s5_nw_init_deactv_bearer_request; - memset( - itti_s5_deactv_ded_bearer_req, - 0, - sizeof(itti_s5_nw_init_deactv_bearer_request_t)); - - itti_s5_deactv_ded_bearer_req->s11_mme_teid = s11_mme_teid; - /* If default bearer has to be deleted then the EBI list in the received - * pgw_nw_init_deactv_bearer_request message contains a single entry at 0th - * index and LBI == bearer_req_p->ebi[0] - */ - if (bearer_req_p->lbi == bearer_req_p->ebi[0]) { - itti_s5_deactv_ded_bearer_req->delete_default_bearer = true; - } - itti_s5_deactv_ded_bearer_req->no_of_bearers = no_of_bearers_to_be_deact; - memcpy( - &itti_s5_deactv_ded_bearer_req->ebi, + bool delete_default_bearer = + (bearer_req_p->lbi == bearer_req_p->ebi[0]) ? true : false; + rc = _spgw_build_and_send_s11_deactivate_bearer_req( + imsi64, + no_of_bearers_to_be_deact, ebi_to_be_deactivated, - (sizeof(ebi_t) * no_of_bearers_to_be_deact)); - - message_p->ittiMsgHeader.imsi = imsi64; + delete_default_bearer, + spgw_ctxt_p->sgw_eps_bearer_context_information.mme_teid_S11); + } + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); +} - OAILOG_INFO( - LOG_PGW_APP, - "Sending nw_initiated_deactv_bearer_req to SGW" - "with delete_default_bearer flag set to %d\n", - itti_s5_deactv_ded_bearer_req->delete_default_bearer); - rc = itti_send_msg_to_task(TASK_SPGW_APP, INSTANCE_DEFAULT, message_p); +// Send ITTI message,S11_NW_INITIATED_DEACTIVATE_BEARER_REQUEST to mme_app +static int32_t _spgw_build_and_send_s11_deactivate_bearer_req( + imsi64_t imsi64, + uint8_t no_of_bearers_to_be_deact, + ebi_t* ebi_to_be_deactivated, + bool delete_default_bearer, + teid_t mme_teid_S11) +{ + OAILOG_FUNC_IN(LOG_SPGW_APP); + MessageDef* message_p = itti_alloc_new_message( + TASK_SPGW_APP, S11_NW_INITIATED_DEACTIVATE_BEARER_REQUEST); + if (message_p == NULL) { + OAILOG_ERROR( + LOG_SPGW_APP, + "itti_alloc_new_message failed for nw_initiated_deactv_bearer_req\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); } + itti_s11_nw_init_deactv_bearer_request_t* s11_bearer_deactv_request = + &message_p->ittiMsg.s11_nw_init_deactv_bearer_request; + memset( + s11_bearer_deactv_request, + 0, + sizeof(itti_s11_nw_init_deactv_bearer_request_t)); + + s11_bearer_deactv_request->s11_mme_teid = mme_teid_S11; + /* If default bearer has to be deleted then the EBI list in the received + * pgw_nw_init_deactv_bearer_request message contains a single entry at 0th + * index and LBI == bearer_req_p->ebi[0] + */ + s11_bearer_deactv_request->delete_default_bearer = delete_default_bearer; + s11_bearer_deactv_request->no_of_bearers = no_of_bearers_to_be_deact; - OAILOG_FUNC_RETURN(LOG_PGW_APP, rc); + memcpy( + s11_bearer_deactv_request->ebi, + ebi_to_be_deactivated, + (sizeof(ebi_t) * no_of_bearers_to_be_deact)); + print_bearer_ids_helper( + s11_bearer_deactv_request->ebi, s11_bearer_deactv_request->no_of_bearers); + + message_p->ittiMsgHeader.imsi = imsi64; + OAILOG_INFO( + LOG_SPGW_APP, + "Sending nw_initiated_deactv_bearer_req to mme_app " + "with delete_default_bearer flag set to %d\n", + s11_bearer_deactv_request->delete_default_bearer); + int rc = itti_send_msg_to_task(TASK_MME_APP, INSTANCE_DEFAULT, message_p); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } //------------------------------------------------------------------------------ - -uint32_t pgw_handle_nw_init_activate_bearer_rsp( - const itti_s5_nw_init_actv_bearer_rsp_t *const act_ded_bearer_rsp) +int spgw_send_nw_init_activate_bearer_rsp( + gtpv2c_cause_value_t cause, + imsi64_t imsi64, + uint8_t eps_bearer_id) { - uint32_t rc = RETURNok; OAILOG_FUNC_IN(LOG_PGW_APP); + uint32_t rc = RETURNok; OAILOG_INFO( - LOG_PGW_APP, - "Sending Create Bearer Rsp to PCRF with EBI %d\n", - act_ded_bearer_rsp->ebi); + LOG_SPGW_APP, + "To be implemented: Sending Create Bearer Rsp to PCRF with EBI %d with " + "cause :%d \n", + eps_bearer_id, + cause); // Send Create Bearer Rsp to PCRF // TODO-Uncomment once implemented at PCRF /* rc = send_dedicated_bearer_actv_rsp(act_ded_bearer_rsp->ebi, act_ded_bearer_rsp->cause);*/ - OAILOG_FUNC_RETURN(LOG_PGW_APP, rc); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } //------------------------------------------------------------------------------ - -uint32_t pgw_handle_nw_init_deactivate_bearer_rsp( - const itti_s5_nw_init_deactv_bearer_rsp_t *const deact_ded_bearer_rsp) +uint32_t spgw_handle_nw_init_deactivate_bearer_rsp( + gtpv2c_cause_t cause, + ebi_t lbi) { uint32_t rc = RETURNok; - OAILOG_FUNC_IN(LOG_PGW_APP); - ebi_t ebi[BEARERS_PER_UE]; + OAILOG_FUNC_IN(LOG_SPGW_APP); - memcpy(ebi, deact_ded_bearer_rsp->ebi, deact_ded_bearer_rsp->no_of_bearers); - print_bearer_ids_helper(ebi, deact_ded_bearer_rsp->no_of_bearers); + OAILOG_INFO( + LOG_SPGW_APP, + "To be implemented: Sending Delete Bearer Rsp to PCRF with LBI %u with " + "cause :%d\n", + lbi, + cause.cause_value); // Send Delete Bearer Rsp to PCRF // TODO-Uncomment once implemented at PCRF - // rc = send_dedicated_bearer_deactv_rsp(deact_ded_bearer_rsp->ebi); - OAILOG_FUNC_RETURN(LOG_PGW_APP, rc); + // rc = send_dedicated_bearer_deactv_rsp(lbi, cause); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); +} + +// Build and send ITTI message, s11_create_bearer_request to MME APP +static int _spgw_build_and_send_s11_create_bearer_request( + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + spgw_state_t* spgw_state, + teid_t s1_u_sgw_fteid) +{ + OAILOG_FUNC_IN(LOG_SPGW_APP); + MessageDef* message_p = NULL; + int rc = RETURNerror; + + message_p = itti_alloc_new_message( + TASK_SPGW_APP, S11_NW_INITIATED_ACTIVATE_BEARER_REQUEST); + if (!message_p) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to allocate message_p for" + "S11_NW_INITIATED_BEARER_ACTV_REQUEST\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); + } + + itti_s11_nw_init_actv_bearer_request_t* s11_actv_bearer_request = + &message_p->ittiMsg.s11_nw_init_actv_bearer_request; + memset( + s11_actv_bearer_request, 0, sizeof(itti_s11_nw_init_actv_bearer_request_t)); + // Context TEID + s11_actv_bearer_request->s11_mme_teid = + spgw_ctxt_p->sgw_eps_bearer_context_information.mme_teid_S11; + // LBI + s11_actv_bearer_request->lbi = bearer_req_p->lbi; + // UL TFT to be sent to UE + memcpy( + &s11_actv_bearer_request->tft, + &bearer_req_p->ul_tft, + sizeof(traffic_flow_template_t)); + // QoS + memcpy( + &s11_actv_bearer_request->eps_bearer_qos, + &bearer_req_p->eps_bearer_qos, + sizeof(bearer_qos_t)); + // S1U SGW F-TEID + s11_actv_bearer_request->s1_u_sgw_fteid.teid = s1_u_sgw_fteid; + s11_actv_bearer_request->s1_u_sgw_fteid.interface_type = S1_U_SGW_GTP_U; + // Set IPv4 address type bit + s11_actv_bearer_request->s1_u_sgw_fteid.ipv4 = true; + + // TODO - IPv6 address + s11_actv_bearer_request->s1_u_sgw_fteid.ipv4_address.s_addr = + spgw_state->sgw_state.sgw_ip_address_S1u_S12_S4_up.s_addr; + message_p->ittiMsgHeader.imsi = + spgw_ctxt_p->sgw_eps_bearer_context_information.imsi64; + OAILOG_INFO( + LOG_SPGW_APP, + "Sending S11 Create Bearer Request to MME_APP for LBI %d IMSI " IMSI_64_FMT, + bearer_req_p->lbi, + message_p->ittiMsgHeader.imsi); + rc = itti_send_msg_to_task(TASK_MME_APP, INSTANCE_DEFAULT, message_p); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); +} + +// Create temporary dedicated bearer context +static int _create_temporary_dedicated_bearer_context( + s_plus_p_gw_eps_bearer_context_information_t* spgw_ctxt_p, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + spgw_state_t* spgw_state, + teid_t s1_u_sgw_fteid) +{ + OAILOG_FUNC_IN(LOG_SPGW_APP); + sgw_eps_bearer_ctxt_t* eps_bearer_ctxt_p = + calloc(1, sizeof(sgw_eps_bearer_ctxt_t)); + + if (!eps_bearer_ctxt_p) { + OAILOG_ERROR( + LOG_SPGW_APP, "Failed to allocate memory for eps_bearer_ctxt_p\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + // Copy PAA from default bearer cntxt + sgw_eps_bearer_ctxt_t* default_eps_bearer_entry_p = + sgw_cm_get_eps_bearer_entry( + &spgw_ctxt_p->sgw_eps_bearer_context_information.pdn_connection, + spgw_ctxt_p->sgw_eps_bearer_context_information.pdn_connection + .default_bearer); + + if (!default_eps_bearer_entry_p) { + OAILOG_ERROR(LOG_SPGW_APP, "Failed to get default bearer context\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + + eps_bearer_ctxt_p->eps_bearer_id = 0; + eps_bearer_ctxt_p->paa = default_eps_bearer_entry_p->paa; + // SGW FTEID + eps_bearer_ctxt_p->s_gw_teid_S1u_S12_S4_up = s1_u_sgw_fteid; + + eps_bearer_ctxt_p->s_gw_ip_address_S1u_S12_S4_up.pdn_type = IPv4; + eps_bearer_ctxt_p->s_gw_ip_address_S1u_S12_S4_up.address.ipv4_address.s_addr = + spgw_state->sgw_state.sgw_ip_address_S1u_S12_S4_up.s_addr; + // DL TFT + memcpy( + &eps_bearer_ctxt_p->tft, + &bearer_req_p->dl_tft, + sizeof(traffic_flow_template_t)); + // QoS + memcpy( + &eps_bearer_ctxt_p->eps_bearer_qos, + &bearer_req_p->eps_bearer_qos, + sizeof(bearer_qos_t)); + + OAILOG_INFO( + LOG_SPGW_APP, + "Number of DL packet filter rules: %d\n", + eps_bearer_ctxt_p->tft.numberofpacketfilters); + + // Create temporary spgw bearer context entry + pgw_ni_cbr_proc_t* pgw_ni_cbr_proc = + pgw_get_procedure_create_bearer(spgw_ctxt_p); + if (!pgw_ni_cbr_proc) { + OAILOG_DEBUG( + LOG_SPGW_APP, "Creating a new temporary eps bearer context entry\n"); + pgw_ni_cbr_proc = pgw_create_procedure_create_bearer(spgw_ctxt_p); + if (!pgw_ni_cbr_proc) { + OAILOG_ERROR( + LOG_SPGW_APP, "Failed to create temporary eps bearer context entry\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + } + struct sgw_eps_bearer_entry_wrapper_s* sgw_eps_bearer_entry_p = + calloc(1, sizeof(*sgw_eps_bearer_entry_p)); + if (!sgw_eps_bearer_entry_p) { + OAILOG_ERROR( + LOG_SPGW_APP, "Failed to allocate memory for sgw_eps_bearer_entry_p\n"); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + } + sgw_eps_bearer_entry_p->sgw_eps_bearer_entry = eps_bearer_ctxt_p; + LIST_INSERT_HEAD( + (pgw_ni_cbr_proc->pending_eps_bearers), sgw_eps_bearer_entry_p, entries); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNok); +} + +// Deletes temporary dedicated bearer context +static void _delete_temporary_dedicated_bearer_context( + teid_t s1_u_sgw_fteid, + ebi_t lbi, + s_plus_p_gw_eps_bearer_context_information_t* spgw_context_p) +{ + OAILOG_FUNC_IN(LOG_SPGW_APP); + pgw_ni_cbr_proc_t* pgw_ni_cbr_proc = NULL; + struct sgw_eps_bearer_entry_wrapper_s* spgw_eps_bearer_entry_p = NULL; + pgw_ni_cbr_proc = pgw_get_procedure_create_bearer(spgw_context_p); + if (!pgw_ni_cbr_proc) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to get Create bearer procedure from temporary stored contexts " + "for lbi :%u \n", + lbi); + OAILOG_FUNC_OUT(LOG_SPGW_APP); + } + OAILOG_INFO( + LOG_SPGW_APP, "Delete temporary bearer context for lbi :%u \n", lbi); + spgw_eps_bearer_entry_p = LIST_FIRST(pgw_ni_cbr_proc->pending_eps_bearers); + while (spgw_eps_bearer_entry_p) { + if ( + s1_u_sgw_fteid == + spgw_eps_bearer_entry_p->sgw_eps_bearer_entry->s_gw_teid_S1u_S12_S4_up) { + // Remove the temporary spgw entry + LIST_REMOVE(spgw_eps_bearer_entry_p, entries); + if (spgw_eps_bearer_entry_p->sgw_eps_bearer_entry) { + free_wrapper((void**) &spgw_eps_bearer_entry_p->sgw_eps_bearer_entry); + } + free_wrapper((void**) &spgw_eps_bearer_entry_p); + break; + } + spgw_eps_bearer_entry_p = LIST_NEXT(spgw_eps_bearer_entry_p, entries); + } + if (LIST_EMPTY(pgw_ni_cbr_proc->pending_eps_bearers)) { + pgw_free_procedure_create_bearer((pgw_ni_cbr_proc_t**) &pgw_ni_cbr_proc); + } + OAILOG_FUNC_OUT(LOG_SPGW_APP); } diff --git a/lte/gateway/c/oai/tasks/sgw/pgw_handlers.h b/lte/gateway/c/oai/tasks/sgw/pgw_handlers.h index ebfd4d38de53..76de05b072b9 100644 --- a/lte/gateway/c/oai/tasks/sgw/pgw_handlers.h +++ b/lte/gateway/c/oai/tasks/sgw/pgw_handlers.h @@ -28,24 +28,29 @@ #ifndef FILE_PGW_HANDLERS_SEEN #define FILE_PGW_HANDLERS_SEEN -#include "s5_messages_types.h" -#include "sgw_messages_types.h" +#include "gx_messages_types.h" #include "spgw_state.h" void handle_s5_create_session_request( spgw_state_t* spgw_state, teid_t context_teid, ebi_t eps_bearer_id); -uint32_t pgw_handle_nw_init_activate_bearer_rsp( - const itti_s5_nw_init_actv_bearer_rsp_t *const act_ded_bearer_rsp); -uint32_t pgw_handle_nw_initiated_bearer_actv_req( - spgw_state_t *spgw_state, - const itti_pgw_nw_init_actv_bearer_request_t *const bearer_req_p, - imsi64_t imsi64); -uint32_t pgw_handle_nw_init_deactivate_bearer_rsp( - const itti_s5_nw_init_deactv_bearer_rsp_t *const deact_ded_bearer_rsp); -uint32_t pgw_handle_nw_initiated_bearer_deactv_req( - spgw_state_t *spgw_state, - const itti_pgw_nw_init_deactv_bearer_request_t *const bearer_req_p, +uint32_t spgw_handle_nw_init_deactivate_bearer_rsp( + gtpv2c_cause_t cause, + ebi_t lbi); +int spgw_handle_nw_initiated_bearer_actv_req( + spgw_state_t* state, + const itti_gx_nw_init_actv_bearer_request_t* const bearer_req_p, + imsi64_t imsi64, + gtpv2c_cause_value_t* failed_cause); + +int32_t spgw_handle_nw_initiated_bearer_deactv_req( + spgw_state_t* spgw_state, + const itti_gx_nw_init_deactv_bearer_request_t* const bearer_req_p, imsi64_t imsi64); + +int spgw_send_nw_init_activate_bearer_rsp( + gtpv2c_cause_value_t cause, + imsi64_t imsi64, + uint8_t eps_bearer_id); #endif /* FILE_PGW_HANDLERS_SEEN */ diff --git a/lte/gateway/c/oai/tasks/sgw/pgw_task.c b/lte/gateway/c/oai/tasks/sgw/pgw_task.c deleted file mode 100644 index 7e39a517b042..000000000000 --- a/lte/gateway/c/oai/tasks/sgw/pgw_task.c +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The OpenAirInterface Software Alliance licenses this file to You under - * the Apache License, Version 2.0 (the "License"); you may not use this file - * except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - *------------------------------------------------------------------------------- - * For more information about the OpenAirInterface (OAI) Software Alliance: - * contact@openairinterface.org - */ - -/*! \file sgw_task.c - \brief - \author Lionel Gauthier - \company Eurecom - \email: lionel.gauthier@eurecom.fr -*/ -#define PGW -#define PGW_TASK_C - -#include -#include - -#include "log.h" -#include "intertask_interface.h" -#include "pgw_defs.h" -#include "pgw_handlers.h" -#include "sgw.h" -#include "common_defs.h" -#include "bstrlib.h" -#include "intertask_interface_types.h" -#include "spgw_config.h" -#include "spgw_state.h" -#include "assertions.h" - -extern __pid_t g_pid; - -static void pgw_exit(void); - -//------------------------------------------------------------------------------ -static void *pgw_intertask_interface(void *args_p) -{ - itti_mark_task_ready(TASK_PGW_APP); - - spgw_state_t *spgw_state_p; - - while (1) { - MessageDef *received_message_p = NULL; - - itti_receive_msg(TASK_PGW_APP, &received_message_p); - - imsi64_t imsi64 = itti_get_associated_imsi(received_message_p); - OAILOG_DEBUG( - LOG_PGW_APP, - "Received message with imsi: " IMSI_64_FMT, - imsi64); - - if (ITTI_MSG_ID(received_message_p) != TERMINATE_MESSAGE) { - spgw_state_p = get_spgw_state(false); - AssertFatal( - spgw_state_p != NULL, "Failed to retrieve SPGW state on PGW task"); - } - - switch (ITTI_MSG_ID(received_message_p)) { - case PGW_NW_INITIATED_ACTIVATE_BEARER_REQ: { - pgw_handle_nw_initiated_bearer_actv_req( - spgw_state_p, - &received_message_p->ittiMsg.pgw_nw_init_actv_bearer_request, - imsi64); - } break; - - case PGW_NW_INITIATED_DEACTIVATE_BEARER_REQ: { - pgw_handle_nw_initiated_bearer_deactv_req( - spgw_state_p, - &received_message_p->ittiMsg.pgw_nw_init_deactv_bearer_request, - imsi64); - } break; - - case S5_NW_INITIATED_ACTIVATE_BEARER_RESP: { - pgw_handle_nw_init_activate_bearer_rsp( - &received_message_p->ittiMsg.s5_nw_init_actv_bearer_response); - } break; - - case S5_NW_INITIATED_DEACTIVATE_BEARER_RESP: { - pgw_handle_nw_init_deactivate_bearer_rsp( - &received_message_p->ittiMsg.s5_nw_init_deactv_bearer_response); - } break; - - case TERMINATE_MESSAGE: { - pgw_exit(); - OAI_FPRINTF_INFO("TASK_PGW terminated\n"); - itti_exit_task(); - } break; - - default: { - OAILOG_DEBUG( - LOG_PGW_APP, - "Unkwnon message ID %d:%s\n", - ITTI_MSG_ID(received_message_p), - ITTI_MSG_NAME(received_message_p)); - } break; - } - - itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p); - received_message_p = NULL; - } - - return NULL; -} - -int pgw_init(spgw_config_t *spgw_config_pP) -{ - if (itti_create_task(TASK_PGW_APP, &pgw_intertask_interface, NULL) < 0) { - perror("pthread_create"); - OAILOG_ALERT(LOG_PGW_APP, "Initializing PGW-APP task interface: ERROR\n"); - return RETURNerror; - } - - FILE *fp = NULL; - bstring filename = bformat("/tmp/pgw_%d.status", g_pid); - fp = fopen(bdata(filename), "w+"); - bdestroy(filename); - fprintf(fp, "STARTED\n"); - fflush(fp); - fclose(fp); - - OAILOG_DEBUG(LOG_PGW_APP, "Initializing PGW-APP task interface: DONE\n"); - return RETURNok; -} - -static void pgw_exit(void) -{ - return; -} diff --git a/lte/gateway/c/oai/tasks/sgw/sgw_handlers.c b/lte/gateway/c/oai/tasks/sgw/sgw_handlers.c index 8add287c7b71..81d776dab646 100644 --- a/lte/gateway/c/oai/tasks/sgw/sgw_handlers.c +++ b/lte/gateway/c/oai/tasks/sgw/sgw_handlers.c @@ -71,6 +71,11 @@ extern spgw_config_t spgw_config; extern struct gtp_tunnel_ops *gtp_tunnel_ops; extern void print_bearer_ids_helper(const ebi_t*, uint32_t); +static void _handle_failed_create_bearer_response( + s_plus_p_gw_eps_bearer_context_information_t* spgw_context, + gtpv2c_cause_value_t cause, + imsi64_t imsi64, + uint8_t eps_bearer_id); #if EMBEDDED_SGW #define TASK_MME TASK_MME_APP @@ -78,8 +83,8 @@ extern void print_bearer_ids_helper(const ebi_t*, uint32_t); #define TASK_MME TASK_S11 #endif -//------------------------------------------------------------------------------ -uint32_t sgw_get_new_s1u_teid(spgw_state_t *state) + //------------------------------------------------------------------------------ + uint32_t sgw_get_new_s1u_teid(spgw_state_t* state) { __sync_fetch_and_add(&state->sgw_state.gtpv1u_teid, 1); return state->sgw_state.gtpv1u_teid; @@ -151,7 +156,7 @@ int sgw_handle_s11_create_session_request( OAILOG_INFO( LOG_SPGW_APP, - "Putting imsi" IMSI_64_FMT " with teid5 %u", + "Inserting imsi" IMSI_64_FMT " with teid5 %u", imsi64, new_endpoint_p->local_teid); @@ -1608,7 +1613,8 @@ int sgw_handle_release_access_bearers_request( } //------------------------------------------------------------------------- -void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) +void handle_s5_create_session_response( + s5_create_session_response_t session_resp) { OAILOG_FUNC_IN(LOG_SPGW_APP); spgw_state_t* spgw_state_p = NULL; @@ -1623,10 +1629,10 @@ void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) LOG_SPGW_APP, "Handle s5_create_session_response, for Context SGW S11 teid, " TEID_FMT "EPS bearer id %u\n", - bearer_resp.context_teid, - bearer_resp.eps_bearer_id); + session_resp.context_teid, + session_resp.eps_bearer_id); - sgi_create_endpoint_resp = bearer_resp.sgi_create_endpoint_resp; + sgi_create_endpoint_resp = session_resp.sgi_create_endpoint_resp; OAILOG_DEBUG( LOG_SPGW_APP, @@ -1636,7 +1642,7 @@ void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) hashtable_ts_get( spgw_state_p->sgw_state.s11_bearer_context_information, - bearer_resp.context_teid, + session_resp.context_teid, (void**) &new_bearer_ctxt_info_p); /* Since bearer context is not found, can not get mme_s11_teid, imsi64, @@ -1646,11 +1652,11 @@ void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) OAILOG_ERROR( LOG_SPGW_APP, "Failed to fetch sgw bearer context from sgw s11 teid: " TEID_FMT "\n", - bearer_resp.context_teid); + session_resp.context_teid); OAILOG_FUNC_OUT(LOG_SPGW_APP); } - if (bearer_resp.failure_cause == S5_OK) { + if (session_resp.failure_cause == S5_OK) { switch (sgi_create_endpoint_resp.status) { case SGI_STATUS_OK: // Send Create Session Response with ack @@ -1715,7 +1721,7 @@ void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) break; } - } else if (bearer_resp.failure_cause == PCEF_FAILURE) { + } else if (session_resp.failure_cause == PCEF_FAILURE) { cause = SERVICE_DENIED; } // Send Create Session Response with Nack @@ -1753,7 +1759,7 @@ void handle_s5_create_session_response(s5_create_session_response_t bearer_resp) &new_bearer_ctxt_info_p->sgw_eps_bearer_context_information.pdn_connection, sgi_create_endpoint_resp.eps_bearer_id); sgw_cm_remove_bearer_context_information( - spgw_state_p, bearer_resp.context_teid); + spgw_state_p, session_resp.context_teid); OAILOG_INFO( LOG_SPGW_APP, "Deleted default bearer context with SGW C-plane TEID = %u " @@ -2180,239 +2186,6 @@ int sgw_handle_create_bearer_response( OAILOG_FUNC_RETURN(LOG_SPGW_APP, rv); } -/* - * Handle NW initiated Dedicated Bearer Activation from PGW - */ -int sgw_handle_nw_initiated_actv_bearer_req( - spgw_state_t *state, - const itti_s5_nw_init_actv_bearer_request_t *const itti_s5_actv_bearer_req, - imsi64_t imsi64) -{ - MessageDef* message_p = NULL; - pgw_ni_cbr_proc_t* pgw_ni_cbr_proc = NULL; - int rc = RETURNok; - - OAILOG_FUNC_IN(LOG_SPGW_APP); - OAILOG_INFO( - LOG_SPGW_APP, - "Received Dedicated Bearer Req Activation from PGW for LBI %d\n", - itti_s5_actv_bearer_req->lbi); - - s_plus_p_gw_eps_bearer_context_information_t* - s_plus_p_gw_eps_bearer_ctxt_info_p = NULL; - hashtable_rc_t hash_rc = HASH_TABLE_OK; - - hash_rc = hashtable_ts_get( - state->sgw_state.s11_bearer_context_information, - itti_s5_actv_bearer_req->s_gw_teid_S11_S4, - (void**) &s_plus_p_gw_eps_bearer_ctxt_info_p); - - if (HASH_TABLE_OK != hash_rc) { - OAILOG_ERROR( - LOG_SPGW_APP, - "Did not find hash table entry for teid %u" - "for S11_NW_INITIATED_BEARER_ACTV_REQUEST\n", - itti_s5_actv_bearer_req->s_gw_teid_S11_S4); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - - //Send ITTI message to MME APP - message_p = itti_alloc_new_message( - TASK_SPGW_APP, S11_NW_INITIATED_ACTIVATE_BEARER_REQUEST); - if (message_p == NULL) { - OAILOG_ERROR( - LOG_SPGW_APP, - "Failed to allocate message_p for" - "S11_NW_INITIATED_BEARER_ACTV_REQUEST\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - if (message_p) { - itti_s11_nw_init_actv_bearer_request_t* s11_actv_bearer_request = - &message_p->ittiMsg.s11_nw_init_actv_bearer_request; - memset( - s11_actv_bearer_request, - 0, - sizeof(itti_s11_nw_init_actv_bearer_request_t)); - //Context TEID - s11_actv_bearer_request->s11_mme_teid = - itti_s5_actv_bearer_req->mme_teid_S11; - //LBI - s11_actv_bearer_request->lbi = itti_s5_actv_bearer_req->lbi; - //PCO - memcpy( - &s11_actv_bearer_request->pco, - &itti_s5_actv_bearer_req->pco, - sizeof(protocol_configuration_options_t)); - //UL TFT to be sent to UE - memcpy( - &s11_actv_bearer_request->tft, - &itti_s5_actv_bearer_req->ul_tft, - sizeof(traffic_flow_template_t)); - //QoS - memcpy( - &s11_actv_bearer_request->eps_bearer_qos, - &itti_s5_actv_bearer_req->eps_bearer_qos, - sizeof(bearer_qos_t)); - - //S1U SGW F-TEID - s11_actv_bearer_request->s1_u_sgw_fteid.teid = sgw_get_new_s1u_teid(state); - s11_actv_bearer_request->s1_u_sgw_fteid.interface_type = S1_U_SGW_GTP_U; - //Set IPv4 address type bit - s11_actv_bearer_request->s1_u_sgw_fteid.ipv4 = true; - - //TODO - IPv6 address - s11_actv_bearer_request->s1_u_sgw_fteid.ipv4_address.s_addr = - state->sgw_state.sgw_ip_address_S1u_S12_S4_up.s_addr; - - // Create temporary dedicated bearer context - sgw_eps_bearer_ctxt_t* eps_bearer_ctxt_p = - calloc(1, sizeof(sgw_eps_bearer_ctxt_t)); - - if (!eps_bearer_ctxt_p) { - OAILOG_ERROR( - LOG_SPGW_APP, "Failed to allocate memory for eps_bearer_ctxt_p\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - // Copy PAA from default bearer cntxt - sgw_eps_bearer_ctxt_t* default_eps_bearer_entry_p = - sgw_cm_get_eps_bearer_entry( - &s_plus_p_gw_eps_bearer_ctxt_info_p->sgw_eps_bearer_context_information - .pdn_connection, - s_plus_p_gw_eps_bearer_ctxt_info_p->sgw_eps_bearer_context_information - .pdn_connection.default_bearer); - - if (!default_eps_bearer_entry_p) { - OAILOG_ERROR(LOG_SPGW_APP, "Failed to get default bearer context\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - - eps_bearer_ctxt_p->eps_bearer_id = 0; - eps_bearer_ctxt_p->paa = default_eps_bearer_entry_p->paa; - // SGW FTEID - eps_bearer_ctxt_p->s_gw_teid_S1u_S12_S4_up = - s11_actv_bearer_request->s1_u_sgw_fteid.teid; - - eps_bearer_ctxt_p->s_gw_ip_address_S1u_S12_S4_up.pdn_type = IPv4; - eps_bearer_ctxt_p->s_gw_ip_address_S1u_S12_S4_up.address.ipv4_address - .s_addr = state->sgw_state.sgw_ip_address_S1u_S12_S4_up.s_addr; - // DL TFT - memcpy( - &eps_bearer_ctxt_p->tft, - &itti_s5_actv_bearer_req->dl_tft, - sizeof(traffic_flow_template_t)); - // QoS - memcpy( - &eps_bearer_ctxt_p->eps_bearer_qos, - &itti_s5_actv_bearer_req->eps_bearer_qos, - sizeof(bearer_qos_t)); - - OAILOG_INFO( - LOG_SPGW_APP, - "Number of DL packet filter rules: %d\n", - eps_bearer_ctxt_p->tft.numberofpacketfilters); - - // Create temporary spgw bearer context entry - pgw_ni_cbr_proc = - pgw_get_procedure_create_bearer(s_plus_p_gw_eps_bearer_ctxt_info_p); - if (!pgw_ni_cbr_proc) { - OAILOG_DEBUG( - LOG_SPGW_APP, "Creating a new temporary eps bearer context entry\n"); - pgw_ni_cbr_proc = - pgw_create_procedure_create_bearer(s_plus_p_gw_eps_bearer_ctxt_info_p); - if (!pgw_ni_cbr_proc) { - OAILOG_ERROR( - LOG_SPGW_APP, - "Failed to create temporary eps bearer context entry\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - } - struct sgw_eps_bearer_entry_wrapper_s* sgw_eps_bearer_entry_p = - calloc(1, sizeof(*sgw_eps_bearer_entry_p)); - if (!sgw_eps_bearer_entry_p) { - OAILOG_ERROR( - LOG_SPGW_APP, "Failed to allocate memory for sgw_eps_bearer_entry_p\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - sgw_eps_bearer_entry_p->sgw_eps_bearer_entry = eps_bearer_ctxt_p; - LIST_INSERT_HEAD( - (pgw_ni_cbr_proc->pending_eps_bearers), sgw_eps_bearer_entry_p, entries); - - OAILOG_INFO( - LOG_SPGW_APP, - "Sending S11_PCRF_DED_BEARER_ACTV_REQUEST to MME with LBI %d\n", - itti_s5_actv_bearer_req->lbi); - - message_p->ittiMsgHeader.imsi = imsi64; - rc = itti_send_msg_to_task(TASK_MME, INSTANCE_DEFAULT, message_p); - } - OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); -} - -int send_activate_dedicated_bearer_rsp_to_pgw( - spgw_state_t* state, - gtpv2c_cause_value_t cause, - teid_t sgw_s11_teid, - ebi_t ebi, - teid_t enb_u_teid, - teid_t sgw_u_teid, - imsi64_t imsi64) -{ - uint32_t rc = RETURNok; - MessageDef* message_p = NULL; - hashtable_rc_t hash_rc = HASH_TABLE_OK; - s_plus_p_gw_eps_bearer_context_information_t* spgw_context = NULL; - - hash_rc = hashtable_ts_get( - state->sgw_state.s11_bearer_context_information, - sgw_s11_teid, - (void**) &spgw_context); - - if (hash_rc != HASH_TABLE_OK) { - OAILOG_ERROR(LOG_SPGW_APP, "Error in retrieving s_plus_p_gw context\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - - message_p = - itti_alloc_new_message(TASK_PGW_APP, S5_NW_INITIATED_ACTIVATE_BEARER_RESP); - if (message_p == NULL) { - OAILOG_ERROR( - LOG_SPGW_APP, - "itti_alloc_new_message failed for S5_ACTIVATE_DEDICATED_BEARER_RSP\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - itti_s5_nw_init_actv_bearer_rsp_t* act_ded_bearer_rsp = - &message_p->ittiMsg.s5_nw_init_actv_bearer_response; - memset(act_ded_bearer_rsp, 0, sizeof(itti_s5_nw_init_actv_bearer_rsp_t)); - // Cause - act_ded_bearer_rsp->cause = cause; - // IMSI - act_ded_bearer_rsp->imsi = - spgw_context->sgw_eps_bearer_context_information.imsi; - // LBI - act_ded_bearer_rsp->lbi = spgw_context->sgw_eps_bearer_context_information - .pdn_connection.default_bearer; - // Fill EBI and TEID values if request is accepted by the UE else send 0 - if (cause == REQUEST_ACCEPTED) { - // EBI - act_ded_bearer_rsp->ebi = ebi; - // S1-U enb TEID - act_ded_bearer_rsp->S1_U_enb_teid = enb_u_teid; - // S1-U sgw TEID - act_ded_bearer_rsp->S1_U_sgw_teid = sgw_u_teid; - } - - message_p->ittiMsgHeader.imsi = imsi64; - - OAILOG_INFO( - LOG_SPGW_APP, - "Sending S5_NW_INIT_ACTIVATE_BEARER_RSP to PGW for EBI %u with cause %d \n", - ebi, - cause); - rc = itti_send_msg_to_task(TASK_PGW_APP, INSTANCE_DEFAULT, message_p); - - OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); -} - /* * Handle NW initiated Dedicated Bearer Activation Rsp from MME */ @@ -2422,279 +2195,231 @@ int sgw_handle_nw_initiated_actv_bearer_rsp( const itti_s11_nw_init_actv_bearer_rsp_t* const s11_actv_bearer_rsp, imsi64_t imsi64) { + OAILOG_FUNC_IN(LOG_SPGW_APP); s_plus_p_gw_eps_bearer_context_information_t* spgw_context = NULL; uint32_t msg_bearer_index = 0; - uint32_t rc = RETURNok; + uint32_t rc = RETURNerror; sgw_eps_bearer_ctxt_t* eps_bearer_ctxt_p = NULL; sgw_eps_bearer_ctxt_t* eps_bearer_ctxt_entry_p = NULL; struct sgw_eps_bearer_entry_wrapper_s* sgw_eps_bearer_entry_p = NULL; gtpv2c_cause_value_t cause = REQUEST_REJECTED; - hashtable_rc_t hash_rc = HASH_TABLE_OK; pgw_ni_cbr_proc_t* pgw_ni_cbr_proc = NULL; + bearer_context_within_create_bearer_response_t bearer_context = {0}; OAILOG_INFO( LOG_SPGW_APP, - "Received nw_initiated_bearer_actv_rsp from MME with EBI %d\n", - s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index] - .eps_bearer_id); - hash_rc = hashtable_ts_get( + "Received nw_initiated_bearer_actv_rsp from MME with EBI %u\n", + bearer_context.eps_bearer_id); + + bearer_context = + s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index]; + hashtable_ts_get( state->sgw_state.s11_bearer_context_information, s11_actv_bearer_rsp->sgw_s11_teid, (void**) &spgw_context); - if ((spgw_context == NULL) || (hash_rc != HASH_TABLE_OK)) { - OAILOG_ERROR(LOG_SPGW_APP, "Error in retrieving s_plus_p_gw context\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); + if (!spgw_context) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Error in retrieving s_plus_p_gw context from sgw_s11_teid " TEID_FMT + "\n", + s11_actv_bearer_rsp->sgw_s11_teid); + _handle_failed_create_bearer_response( + spgw_context, + s11_actv_bearer_rsp->cause.cause_value, + imsi64, + bearer_context.eps_bearer_id); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } + //-------------------------------------- // EPS bearer entry //-------------------------------------- // TODO multiple bearers pgw_ni_cbr_proc = pgw_get_procedure_create_bearer(spgw_context); - if (pgw_ni_cbr_proc) { - sgw_eps_bearer_entry_p = LIST_FIRST(pgw_ni_cbr_proc->pending_eps_bearers); - while (sgw_eps_bearer_entry_p != NULL) { - if ((s11_actv_bearer_rsp->bearer_contexts - .bearer_contexts[msg_bearer_index] - .s1u_sgw_fteid.teid == sgw_eps_bearer_entry_p->sgw_eps_bearer_entry - ->s_gw_teid_S1u_S12_S4_up)) { - /* If UE accepted the request create eps bearer context. - * If UE did not accept the request send reject to NW - */ - if (s11_actv_bearer_rsp->cause.cause_value == REQUEST_ACCEPTED) { - eps_bearer_ctxt_p = sgw_eps_bearer_entry_p->sgw_eps_bearer_entry; - if (eps_bearer_ctxt_p) { - eps_bearer_ctxt_p->eps_bearer_id = - s11_actv_bearer_rsp->bearer_contexts - .bearer_contexts[msg_bearer_index] - .eps_bearer_id; + if (!pgw_ni_cbr_proc) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to get create bearer procedure from temporary stored context, so " + "did not create new EPS bearer entry for EBI %u\n", + bearer_context.eps_bearer_id); + _handle_failed_create_bearer_response( + spgw_context, + s11_actv_bearer_rsp->cause.cause_value, + imsi64, + bearer_context.eps_bearer_id); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); + } + // If UE did not accept the request send reject to NW + if (s11_actv_bearer_rsp->cause.cause_value != REQUEST_ACCEPTED) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Did not create new EPS bearer entry as " + "UE rejected the request for EBI %u\n", + bearer_context.eps_bearer_id); + _handle_failed_create_bearer_response( + spgw_context, + s11_actv_bearer_rsp->cause.cause_value, + imsi64, + bearer_context.eps_bearer_id); + OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); + } + + sgw_eps_bearer_entry_p = LIST_FIRST(pgw_ni_cbr_proc->pending_eps_bearers); + while (sgw_eps_bearer_entry_p) { + if ( + bearer_context.s1u_sgw_fteid.teid == + sgw_eps_bearer_entry_p->sgw_eps_bearer_entry->s_gw_teid_S1u_S12_S4_up) { + eps_bearer_ctxt_p = sgw_eps_bearer_entry_p->sgw_eps_bearer_entry; + if (eps_bearer_ctxt_p) { + eps_bearer_ctxt_p->eps_bearer_id = bearer_context.eps_bearer_id; + + // Store enb-s1u teid and ip address + get_fteid_ip_address( + &bearer_context.s1u_enb_fteid, + &eps_bearer_ctxt_p->enb_ip_address_S1u); + eps_bearer_ctxt_p->enb_teid_S1u = bearer_context.s1u_enb_fteid.teid; + + eps_bearer_ctxt_entry_p = sgw_cm_insert_eps_bearer_ctxt_in_collection( + &spgw_context->sgw_eps_bearer_context_information.pdn_connection, + eps_bearer_ctxt_p); + if (eps_bearer_ctxt_entry_p == NULL) { + OAILOG_ERROR(LOG_SPGW_APP, "Failed to create new EPS bearer entry\n"); + increment_counter( + "s11_actv_bearer_rsp", + 1, + 2, + "result", + "failure", + "cause", + "internal_software_error"); + } else { + OAILOG_INFO( + LOG_SPGW_APP, + "Successfully created new EPS bearer entry with EBI %d\n", + eps_bearer_ctxt_p->eps_bearer_id); + + cause = REQUEST_ACCEPTED; + // setup GTPv1-U tunnel for each packet filter + // enb, UE and imsi are common across rules + struct in_addr enb = {.s_addr = 0}; + enb.s_addr = eps_bearer_ctxt_entry_p->enb_ip_address_S1u.address + .ipv4_address.s_addr; + struct in_addr ue = {.s_addr = 0}; + ue.s_addr = eps_bearer_ctxt_entry_p->paa.ipv4_address.s_addr; + Imsi_t imsi = spgw_context->sgw_eps_bearer_context_information.imsi; + // Iterate of packet filter rules + OAILOG_INFO( + LOG_SPGW_APP, + "Number of packet filter rules: %d\n", + eps_bearer_ctxt_entry_p->tft.numberofpacketfilters); + for (int i = 0; + i < eps_bearer_ctxt_entry_p->tft.numberofpacketfilters; + ++i) { + packet_filter_contents_t packet_filter = + eps_bearer_ctxt_entry_p->tft.packetfilterlist.createnewtft[i] + .packetfiltercontents; + + // Prepare DL flow rule + // The TFTs are DL TFTs: UE is the destination/local, + // PDN end point is the source/remote. + struct ipv4flow_dl dlflow; + + // Adding UE to the rule is safe + dlflow.dst_ip.s_addr = ue.s_addr; + + // At least we can match on IP proto and UE IPv4 addr; + // when IPv6 is supported, we need to revisit this. + dlflow.set_params = IP_PROTO | DST_IPV4; + + // Process remote address if present + if ( + (TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG & + packet_filter.flags) == + TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG) { + struct in_addr remoteaddr = {.s_addr = 0}; + remoteaddr.s_addr = (packet_filter.ipv4remoteaddr[0].addr << 24) + + (packet_filter.ipv4remoteaddr[1].addr << 16) + + (packet_filter.ipv4remoteaddr[2].addr << 8) + + packet_filter.ipv4remoteaddr[3].addr; + dlflow.src_ip.s_addr = ntohl(remoteaddr.s_addr); + dlflow.set_params |= SRC_IPV4; + } + + // Specify next header + dlflow.ip_proto = packet_filter.protocolidentifier_nextheader; + + // Process remote port if present + if ( + (TRAFFIC_FLOW_TEMPLATE_SINGLE_REMOTE_PORT_FLAG & + packet_filter.flags) == + TRAFFIC_FLOW_TEMPLATE_SINGLE_REMOTE_PORT_FLAG) { + if (dlflow.ip_proto == IPPROTO_TCP) { + dlflow.set_params |= TCP_SRC_PORT; + dlflow.tcp_src_port = packet_filter.singleremoteport; + } else if (dlflow.ip_proto == IPPROTO_UDP) { + dlflow.set_params |= UDP_SRC_PORT; + dlflow.udp_src_port = packet_filter.singleremoteport; + } + } - // Store enb-s1u teid and ip address - get_fteid_ip_address( - &s11_actv_bearer_rsp->bearer_contexts - .bearer_contexts[msg_bearer_index] - .s1u_enb_fteid, - &eps_bearer_ctxt_p->enb_ip_address_S1u); - eps_bearer_ctxt_p->enb_teid_S1u = - s11_actv_bearer_rsp->bearer_contexts - .bearer_contexts[msg_bearer_index] - .s1u_enb_fteid.teid; - - eps_bearer_ctxt_entry_p = - sgw_cm_insert_eps_bearer_ctxt_in_collection( - &spgw_context->sgw_eps_bearer_context_information - .pdn_connection, - eps_bearer_ctxt_p); - if (eps_bearer_ctxt_entry_p == NULL) { + // Process UE port if present + if ( + (TRAFFIC_FLOW_TEMPLATE_SINGLE_LOCAL_PORT_FLAG & + packet_filter.flags) == + TRAFFIC_FLOW_TEMPLATE_SINGLE_LOCAL_PORT_FLAG) { + if (dlflow.ip_proto == IPPROTO_TCP) { + dlflow.set_params |= TCP_DST_PORT; + dlflow.tcp_dst_port = packet_filter.singleremoteport; + } else if (dlflow.ip_proto == IPPROTO_UDP) { + dlflow.set_params |= UDP_DST_PORT; + dlflow.udp_dst_port = packet_filter.singleremoteport; + } + } + rc = gtp_tunnel_ops->add_tunnel( + ue, + enb, + eps_bearer_ctxt_entry_p->s_gw_teid_S1u_S12_S4_up, + eps_bearer_ctxt_entry_p->enb_teid_S1u, + imsi, + &dlflow, + eps_bearer_ctxt_entry_p->tft.packetfilterlist.createnewtft[i] + .eval_precedence); + + if (rc < 0) { OAILOG_ERROR( - LOG_SPGW_APP, "Failed to create new EPS bearer entry\n"); - increment_counter( - "s11_actv_bearer_rsp", - 1, - 2, - "result", - "failure", - "cause", - "internal_software_error"); + LOG_SPGW_APP, "ERROR in setting up TUNNEL err=%d\n", rc); } else { OAILOG_INFO( LOG_SPGW_APP, - "Successfully created new EPS bearer entry with EBI %d\n", - eps_bearer_ctxt_p->eps_bearer_id); - cause = REQUEST_ACCEPTED; - - OAILOG_INFO( - LOG_SPGW_APP, - "Number of packet filter rules: %d\n", - eps_bearer_ctxt_entry_p->tft.numberofpacketfilters); - // setup GTPv1-U tunnel for each packet filter - // enb, UE and imsi are common across rules - struct in_addr enb = {.s_addr = 0}; - enb.s_addr = eps_bearer_ctxt_entry_p->enb_ip_address_S1u.address - .ipv4_address.s_addr; - struct in_addr ue = {.s_addr = 0}; - ue.s_addr = eps_bearer_ctxt_entry_p->paa.ipv4_address.s_addr; - Imsi_t imsi = - spgw_context->sgw_eps_bearer_context_information.imsi; - // Iterate of packet filter rules - for (int i = 0; - i < eps_bearer_ctxt_entry_p->tft.numberofpacketfilters; - ++i) { - packet_filter_contents_t packet_filter = - eps_bearer_ctxt_entry_p->tft.packetfilterlist.createnewtft[i] - .packetfiltercontents; - - // Prepare DL flow rule - // The TFTs are DL TFTs: UE is the destination/local, - // PDN end point is the source/remote. - struct ipv4flow_dl dlflow; - - // Adding UE to the rule is safe - dlflow.dst_ip.s_addr = ue.s_addr; - - // At least we can match on IP proto and UE IPv4 addr; - // when IPv6 is supported, we need to revisit this. - dlflow.set_params = IP_PROTO | DST_IPV4; - - // Process remote address if present - if ( - (TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG & - packet_filter.flags) == - TRAFFIC_FLOW_TEMPLATE_IPV4_REMOTE_ADDR_FLAG) { - struct in_addr remoteaddr = {.s_addr = 0}; - remoteaddr.s_addr = - (packet_filter.ipv4remoteaddr[0].addr << 24) + - (packet_filter.ipv4remoteaddr[1].addr << 16) + - (packet_filter.ipv4remoteaddr[2].addr << 8) + - packet_filter.ipv4remoteaddr[3].addr; - dlflow.src_ip.s_addr = ntohl(remoteaddr.s_addr); - dlflow.set_params |= SRC_IPV4; - } - - // Specify next header - dlflow.ip_proto = packet_filter.protocolidentifier_nextheader; - - // Process remote port if present - if ( - (TRAFFIC_FLOW_TEMPLATE_SINGLE_REMOTE_PORT_FLAG & - packet_filter.flags) == - TRAFFIC_FLOW_TEMPLATE_SINGLE_REMOTE_PORT_FLAG) { - if (dlflow.ip_proto == IPPROTO_TCP) { - dlflow.set_params |= TCP_SRC_PORT; - dlflow.tcp_src_port = packet_filter.singleremoteport; - } else if (dlflow.ip_proto == IPPROTO_UDP) { - dlflow.set_params |= UDP_SRC_PORT; - dlflow.udp_src_port = packet_filter.singleremoteport; - } - } - - // Process UE port if present - if ( - (TRAFFIC_FLOW_TEMPLATE_SINGLE_LOCAL_PORT_FLAG & - packet_filter.flags) == - TRAFFIC_FLOW_TEMPLATE_SINGLE_LOCAL_PORT_FLAG) { - if (dlflow.ip_proto == IPPROTO_TCP) { - dlflow.set_params |= TCP_DST_PORT; - dlflow.tcp_dst_port = packet_filter.singleremoteport; - } else if (dlflow.ip_proto == IPPROTO_UDP) { - dlflow.set_params |= UDP_DST_PORT; - dlflow.udp_dst_port = packet_filter.singleremoteport; - } - } - int rv = RETURNok; - rv = gtp_tunnel_ops->add_tunnel( - ue, - enb, - eps_bearer_ctxt_entry_p->s_gw_teid_S1u_S12_S4_up, - eps_bearer_ctxt_entry_p->enb_teid_S1u, - imsi, - &dlflow, - eps_bearer_ctxt_entry_p->tft.packetfilterlist.createnewtft[i] - .eval_precedence); - - if (rv < 0) { - OAILOG_ERROR( - LOG_SPGW_APP, "ERROR in setting up TUNNEL err=%d\n", rv); - } else { - OAILOG_INFO( - LOG_SPGW_APP, - "Successfully setup flow rule for EPS bearer id %u tunnel " TEID_FMT - " (eNB) <-> (SGW) " TEID_FMT "\n", - eps_bearer_ctxt_entry_p->eps_bearer_id, - eps_bearer_ctxt_entry_p->enb_teid_S1u, - eps_bearer_ctxt_entry_p->s_gw_teid_S1u_S12_S4_up); - } - } + "Successfully setup flow rule for EPS bearer id %u " + "tunnel " TEID_FMT " (eNB) <-> (SGW) " TEID_FMT "\n", + eps_bearer_ctxt_entry_p->eps_bearer_id, + eps_bearer_ctxt_entry_p->enb_teid_S1u, + eps_bearer_ctxt_entry_p->s_gw_teid_S1u_S12_S4_up); } } - } else { - OAILOG_ERROR( - LOG_SPGW_APP, - "Did not create new EPS bearer entry as " - "UE rejected the request for EBI %d\n", - s11_actv_bearer_rsp->bearer_contexts - .bearer_contexts[msg_bearer_index] - .eps_bearer_id); } - // Remove the temporary spgw entry - LIST_REMOVE(sgw_eps_bearer_entry_p, entries); - free_wrapper((void**) &sgw_eps_bearer_entry_p); - break; } - sgw_eps_bearer_entry_p = LIST_NEXT(sgw_eps_bearer_entry_p, entries); + // Remove the temporary spgw entry + LIST_REMOVE(sgw_eps_bearer_entry_p, entries); + free_wrapper((void**) &sgw_eps_bearer_entry_p); + break; } - } else { - OAILOG_ERROR( - LOG_SPGW_APP, - "Did not create new EPS bearer entry for EBI %u\n", - s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index] - .eps_bearer_id); + sgw_eps_bearer_entry_p = LIST_NEXT(sgw_eps_bearer_entry_p, entries); } - // Send ACTIVATE_DEDICATED_BEARER_RSP to PGW - rc = send_activate_dedicated_bearer_rsp_to_pgw( - state, - cause, - s11_actv_bearer_rsp->sgw_s11_teid, - s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index] - .eps_bearer_id, - s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index] - .s1u_enb_fteid.teid, - s11_actv_bearer_rsp->bearer_contexts.bearer_contexts[msg_bearer_index] - .s1u_sgw_fteid.teid, - imsi64); - if (rc != RETURNok) { - OAILOG_ERROR( - LOG_SPGW_APP, "Did not send ACTIVATE_DEDICATED_BEARER_RSP to PGW\n"); + if (pgw_ni_cbr_proc && (LIST_EMPTY(pgw_ni_cbr_proc->pending_eps_bearers))) { + pgw_base_proc_t* base_proc1 = LIST_FIRST( + spgw_context->sgw_eps_bearer_context_information.pending_procedures); + LIST_REMOVE(base_proc1, entries); + pgw_free_procedure_create_bearer((pgw_ni_cbr_proc_t**) &pgw_ni_cbr_proc); } - OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); -} - -/* - * Handle NW-initiated dedicated bearer deactivation from PGW - */ -uint32_t sgw_handle_nw_initiated_deactv_bearer_req( - const itti_s5_nw_init_deactv_bearer_request_t* const - itti_s5_deactiv_ded_bearer_req, - imsi64_t imsi64) -{ - MessageDef* message_p = NULL; - uint32_t rc = RETURNok; - - OAILOG_FUNC_IN(LOG_SPGW_APP); - OAILOG_INFO( - LOG_SPGW_APP, - "Received nw_initiated_deactv_bearer_req from PGW for TEID %u\n", - itti_s5_deactiv_ded_bearer_req->s11_mme_teid); - - //Build and send ITTI message to MME APP - message_p = itti_alloc_new_message( - TASK_SPGW_APP, S11_NW_INITIATED_DEACTIVATE_BEARER_REQUEST); - if (message_p) { - itti_s11_nw_init_deactv_bearer_request_t* s11_pcrf_bearer_deactv_request = - &message_p->ittiMsg.s11_nw_init_deactv_bearer_request; - memset( - s11_pcrf_bearer_deactv_request, - 0, - sizeof(itti_s11_nw_init_deactv_bearer_request_t)); - memcpy( - s11_pcrf_bearer_deactv_request, - itti_s5_deactiv_ded_bearer_req, - sizeof(itti_s11_nw_init_deactv_bearer_request_t)); - OAILOG_INFO( - LOG_SPGW_APP, - "Sending nw_initiated_deactv_bearer_req to MME with %d EBIs\n", - itti_s5_deactiv_ded_bearer_req->no_of_bearers); - print_bearer_ids_helper( - s11_pcrf_bearer_deactv_request->ebi, - itti_s5_deactiv_ded_bearer_req->no_of_bearers); - - message_p->ittiMsgHeader.imsi = imsi64; - rc = itti_send_msg_to_task(TASK_MME, INSTANCE_DEFAULT, message_p); - } else { + // Send ACTIVATE_DEDICATED_BEARER_RSP to PCRF + rc = spgw_send_nw_init_activate_bearer_rsp( + cause, imsi64, bearer_context.eps_bearer_id); + if (rc != RETURNok) { OAILOG_ERROR( - LOG_SPGW_APP, - "itti_alloc_new_message failed for nw_initiated_deactv_bearer_req\n"); - rc = RETURNerror; + LOG_SPGW_APP, "Failed to send ACTIVATE_DEDICATED_BEARER_RSP to PCRF\n"); } OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } @@ -2713,7 +2438,7 @@ int sgw_handle_nw_initiated_deactv_bearer_rsp( uint32_t i = 0; s_plus_p_gw_eps_bearer_context_information_t *spgw_ctxt = NULL; uint32_t no_of_bearers = 0; - ebi_t ebi = {0}; + ebi_t ebi = 0; hashtable_rc_t hash_rc = HASH_TABLE_OK; itti_sgi_delete_end_point_request_t sgi_delete_end_point_request; @@ -2818,40 +2543,9 @@ int sgw_handle_nw_initiated_deactv_bearer_rsp( } } } - //Send DEACTIVATE_DEDICATED_BEARER_RSP to PGW - MessageDef *message_p = NULL; - message_p = itti_alloc_new_message( - TASK_PGW_APP, S5_NW_INITIATED_DEACTIVATE_BEARER_RESP); - if (message_p == NULL) { - OAILOG_ERROR( - LOG_MME_APP, - "itti_alloc_new_message failed for nw_initiated_deactv_bearer_rsp\n"); - OAILOG_FUNC_RETURN(LOG_SPGW_APP, RETURNerror); - } - itti_s5_nw_init_deactv_bearer_rsp_t *deact_ded_bearer_rsp = - &message_p->ittiMsg.s5_nw_init_deactv_bearer_response; - deact_ded_bearer_rsp->no_of_bearers = - s11_pcrf_ded_bearer_deactv_rsp->bearer_contexts.num_bearer_context; - - for (i = 0; i < deact_ded_bearer_rsp->no_of_bearers; i++) { - //EBI - deact_ded_bearer_rsp->ebi[i] = ebi; - //Cause - deact_ded_bearer_rsp->cause.cause_value = - s11_pcrf_ded_bearer_deactv_rsp->bearer_contexts.bearer_contexts[i] - .cause.cause_value; - } - OAILOG_INFO( - LOG_MME_APP, - "Sending nw_initiated_deactv_bearer_rsp to PGW with %d EBIs\n", - deact_ded_bearer_rsp->no_of_bearers); - print_bearer_ids_helper( - deact_ded_bearer_rsp->ebi, deact_ded_bearer_rsp->no_of_bearers); - - message_p->ittiMsgHeader.imsi = imsi64; - - rc = itti_send_msg_to_task(TASK_PGW_APP, INSTANCE_DEFAULT, message_p); - + // Send DEACTIVATE_DEDICATED_BEARER_RSP to SPGW Service + spgw_handle_nw_init_deactivate_bearer_rsp( + s11_pcrf_ded_bearer_deactv_rsp->cause, ebi); OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } @@ -2879,3 +2573,29 @@ bool is_enb_ip_address_same(const fteid_t *fte_p, ip_address_t *ip_p) } OAILOG_FUNC_RETURN(LOG_SPGW_APP, rc); } + +static void _handle_failed_create_bearer_response( + s_plus_p_gw_eps_bearer_context_information_t* spgw_context, + gtpv2c_cause_value_t cause, + imsi64_t imsi64, + uint8_t eps_bearer_id) +{ + OAILOG_FUNC_IN(LOG_SPGW_APP); + pgw_ni_cbr_proc_t* pgw_ni_cbr_proc = NULL; + if (spgw_context) { + pgw_ni_cbr_proc = pgw_get_procedure_create_bearer(spgw_context); + if ( + (pgw_ni_cbr_proc) && (LIST_EMPTY(pgw_ni_cbr_proc->pending_eps_bearers))) { + pgw_base_proc_t* base_proc1 = LIST_FIRST( + spgw_context->sgw_eps_bearer_context_information.pending_procedures); + LIST_REMOVE(base_proc1, entries); + pgw_free_procedure_create_bearer((pgw_ni_cbr_proc_t**) &pgw_ni_cbr_proc); + } + } + int rc = spgw_send_nw_init_activate_bearer_rsp(cause, imsi64, eps_bearer_id); + if (rc != RETURNok) { + OAILOG_ERROR( + LOG_SPGW_APP, "Failed to send ACTIVATE_DEDICATED_BEARER_RSP to PCRF\n"); + } + OAILOG_FUNC_OUT(LOG_SPGW_APP); +} diff --git a/lte/gateway/c/oai/tasks/sgw/sgw_handlers.h b/lte/gateway/c/oai/tasks/sgw/sgw_handlers.h index 3985b8cdc729..cb7d0d50c16f 100644 --- a/lte/gateway/c/oai/tasks/sgw/sgw_handlers.h +++ b/lte/gateway/c/oai/tasks/sgw/sgw_handlers.h @@ -33,7 +33,6 @@ #include "gtpv1_u_messages_types.h" #include "ip_forward_messages_types.h" #include "s11_messages_types.h" -#include "s5_messages_types.h" #include "spgw_state.h" int sgw_handle_s11_create_session_request( @@ -78,29 +77,15 @@ int sgw_no_pcef_create_dedicated_bearer(spgw_state_t *state, s11_teid_t teid, int sgw_handle_create_bearer_response( spgw_state_t *state, const itti_s11_create_bearer_response_t *const create_bearer_response_pP); -int sgw_handle_nw_initiated_actv_bearer_req( - spgw_state_t *state, - const itti_s5_nw_init_actv_bearer_request_t *const itti_s5_actv_bearer_req, - imsi64_t imsi64); int sgw_handle_nw_initiated_actv_bearer_rsp( spgw_state_t *state, const itti_s11_nw_init_actv_bearer_rsp_t *const s11_actv_bearer_rsp, imsi64_t imsi64); -uint32_t sgw_handle_nw_initiated_deactv_bearer_req( - const itti_s5_nw_init_deactv_bearer_request_t - *const itti_s5_deactiv_ded_bearer_req, imsi64_t imsi64); int sgw_handle_nw_initiated_deactv_bearer_rsp( spgw_state_t *state, const itti_s11_nw_init_deactv_bearer_rsp_t *const s11_pcrf_ded_bearer_deactv_rsp, imsi64_t imsi64); bool is_enb_ip_address_same(const fteid_t *fte_p, ip_address_t *ip_p); -int send_activate_dedicated_bearer_rsp_to_pgw( - spgw_state_t* state, - gtpv2c_cause_value_t cause, - teid_t s_gw_teid_S11_S4, - ebi_t ebi, - teid_t enb_u_teid, - teid_t sgw_u_teid, - imsi64_t imsi64); +uint32_t sgw_get_new_s1u_teid(spgw_state_t* state); #endif /* FILE_SGW_HANDLERS_SEEN */ diff --git a/lte/gateway/c/oai/tasks/sgw/sgw_task.c b/lte/gateway/c/oai/tasks/sgw/sgw_task.c index 1941a1f2d481..a205265fbe22 100644 --- a/lte/gateway/c/oai/tasks/sgw/sgw_task.c +++ b/lte/gateway/c/oai/tasks/sgw/sgw_task.c @@ -44,6 +44,7 @@ #include "itti_free_defined_msg.h" #include "sgw_defs.h" #include "sgw_handlers.h" +#include "pgw_handlers.h" #include "sgw_config.h" #include "sgw_context_manager.h" #include "pgw_ue_ip_address_alloc.h" @@ -152,26 +153,6 @@ static void* sgw_intertask_interface(void* args_p) &received_message_p->ittiMsg.sgi_update_end_point_response, imsi64); } break; - case S5_NW_INITIATED_ACTIVATE_BEARER_REQ: { - //Handle Dedicated bearer activation from PCRF - if ( - sgw_handle_nw_initiated_actv_bearer_req( - spgw_state_p, - &received_message_p->ittiMsg.s5_nw_init_actv_bearer_request, - imsi64) != RETURNok) { - // If request handling fails send reject to PGW - send_activate_dedicated_bearer_rsp_to_pgw( - spgw_state_p, - REQUEST_REJECTED /*Cause*/, - received_message_p->ittiMsg.s5_nw_init_actv_bearer_request - .s_gw_teid_S11_S4, /*SGW C-plane teid to fetch spgw context*/ - 0 /*EBI*/, - 0 /*enb teid*/, - 0 /*sgw teid*/, - imsi64); - } - } break; - case S11_NW_INITIATED_ACTIVATE_BEARER_RESP: { //Handle Dedicated bearer Activation Rsp from MME sgw_handle_nw_initiated_actv_bearer_rsp( @@ -180,13 +161,6 @@ static void* sgw_intertask_interface(void* args_p) imsi64); } break; - case S5_NW_INITIATED_DEACTIVATE_BEARER_REQ: { - //Handle Dedicated bearer Deactivation Req from PGW - sgw_handle_nw_initiated_deactv_bearer_req( - &received_message_p->ittiMsg.s5_nw_init_deactv_bearer_request, - imsi64); - } break; - case S11_NW_INITIATED_DEACTIVATE_BEARER_RESP: { //Handle Dedicated bearer deactivation Rsp from MME sgw_handle_nw_initiated_deactv_bearer_rsp( @@ -195,6 +169,47 @@ static void* sgw_intertask_interface(void* args_p) imsi64); } break; + case GX_NW_INITIATED_ACTIVATE_BEARER_REQ: { + /* TODO need to discuss as part sending response to PCEF, + * should these errors need to be mapped to gx errors + * or sessiond does mapping of these error codes to gx error codes + */ + gtpv2c_cause_value_t failed_cause = REQUEST_ACCEPTED; + int32_t rc = spgw_handle_nw_initiated_bearer_actv_req( + spgw_state_p, + &received_message_p->ittiMsg.gx_nw_init_actv_bearer_request, + imsi64, + &failed_cause); + if (rc != RETURNok) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Send Create Bearer Failure Response to PCRF with cause :%d \n", + failed_cause); + // Send Reject to PCRF + // TODO-Uncomment once implemented at PCRF + /* rc = send_dedicated_bearer_actv_rsp(bearer_req_p->lbi, + * failed_cause); + */ + } + } break; + + case GX_NW_INITIATED_DEACTIVATE_BEARER_REQ: { + int32_t rc = spgw_handle_nw_initiated_bearer_deactv_req( + spgw_state_p, + &received_message_p->ittiMsg.gx_nw_init_deactv_bearer_request, + imsi64); + if (rc != RETURNok) { + OAILOG_ERROR( + LOG_SPGW_APP, + "Failed to handle NW_INITIATED_DEACTIVATE_BEARER_REQ for imsi:%ld, " + "send bearer deactivation reject to SPGW service \n", + imsi64); + // TODO-Uncomment once implemented at PCRF + /* rc = send_dedicated_bearer_deactv_rsp(invalid_bearer_id,REQUEST_REJECTED); + */ + } + } break; + case TERMINATE_MESSAGE: { put_spgw_state(); sgw_exit(); diff --git a/lte/gateway/c/session_manager/sessiond_main.cpp b/lte/gateway/c/session_manager/sessiond_main.cpp index 1fa32dbf1eee..70b3a3685853 100644 --- a/lte/gateway/c/session_manager/sessiond_main.cpp +++ b/lte/gateway/c/session_manager/sessiond_main.cpp @@ -55,11 +55,26 @@ static magma::mconfig::SessionD load_mconfig() return mconfig; } +static const std::shared_ptr get_local_controller( + const YAML::Node &config) +{ + auto port = config["local_session_proxy_port"].as(); + auto addr = "127.0.0.1:" + port; + MLOG(MINFO) << "Using local address " << addr << " for controller"; + return grpc::CreateCustomChannel( + addr, grpc::InsecureChannelCredentials(), grpc::ChannelArguments {}); +} + static const std::shared_ptr get_controller_channel( const YAML::Node &config, const bool relay_enabled) { if (relay_enabled) { MLOG(MINFO) << "Using proxied sessiond controller"; + if (config["use_local_session_proxy"].IsDefined() && + config["use_local_session_proxy"].as()) { + // Use a locally running SessionProxy. (Used for testing) + return get_local_controller(config); + } return magma::ServiceRegistrySingleton::Instance()->GetGrpcChannel( SESSION_PROXY_SERVICE, magma::ServiceRegistrySingleton::CLOUD); } else { diff --git a/lte/gateway/configs/magmad.yml b/lte/gateway/configs/magmad.yml index 5e326c2e38c6..80b88cec4553 100644 --- a/lte/gateway/configs/magmad.yml +++ b/lte/gateway/configs/magmad.yml @@ -66,12 +66,9 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: True enable_network_monitor: True -enable_systemd_tailer: True enable_sync_rpc: True enable_kernel_version_checking: True -systemd_tailer_poll_interval: 30 # seconds - network_monitor_config: # How long to sleep between statistic collections sampling_period: 60 diff --git a/lte/gateway/configs/redirectd.yml b/lte/gateway/configs/redirectd.yml index a7a00e6a59b9..73800ae04b4a 100644 --- a/lte/gateway/configs/redirectd.yml +++ b/lte/gateway/configs/redirectd.yml @@ -9,6 +9,4 @@ log_level: INFO -scribe_logging_enabled: False - http_port: 8080 diff --git a/lte/gateway/python/integ_tests/common/subscriber_db_client.py b/lte/gateway/python/integ_tests/common/subscriber_db_client.py index 3db86b511272..19454df3c927 100644 --- a/lte/gateway/python/integ_tests/common/subscriber_db_client.py +++ b/lte/gateway/python/integ_tests/common/subscriber_db_client.py @@ -13,13 +13,16 @@ import abc import base64 import grpc -#import swagger_client from orc8r.protos.common_pb2 import Void -from lte.protos.subscriberdb_pb2 import LTESubscription, SubscriberData, \ - SubscriberState, SubscriberID +from lte.protos.subscriberdb_pb2 import ( + LTESubscription, + SubscriberData, + SubscriberState, + SubscriberID, + SubscriberUpdate, +) from lte.protos.subscriberdb_pb2_grpc import SubscriberDBStub -#from integ_tests.cloud.fixtures import GATEWAY_ID, NETWORK_ID from integ_tests.gateway.rpc import get_gateway_hw_id, get_rpc_channel from magma.subscriberdb.sid import SIDUtils @@ -132,6 +135,31 @@ def _get_subscriberdb_data(sid): state.lte_auth_next_seq = 1 return SubscriberData(sid=sub_db_sid, lte=lte, state=state) + @staticmethod + def _get_apn_data(sid, apn_list): + """ + Get APN data in protobuf format. + + Args: + apn_list : list of APN configuration + Returns: + update (protos.subscriberdb_pb2.SubscriberUpdate) + """ + # APN + update = SubscriberUpdate() + update.data.sid.CopyFrom(sid) + non_3gpp = update.data.non_3gpp + for apn in apn_list: + apn_config = non_3gpp.apn_config.add() + apn_config.service_selection = apn["apn_name"] + apn_config.qos_profile.class_id = apn["qci"] + apn_config.qos_profile.priority_level = apn["priority"] + apn_config.qos_profile.preemption_capability = apn["pre_cap"] + apn_config.qos_profile.preemption_vulnerability = apn["pre_vul"] + apn_config.ambr.max_bandwidth_ul = apn["mbr_ul"] + apn_config.ambr.max_bandwidth_dl = apn["mbr_dl"] + return update + def _check_invariants(self): """ Assert preservation of invariants. @@ -147,7 +175,8 @@ def add_subscriber(self, sid): self._added_sids.add(sid) sub_data = self._get_subscriberdb_data(sid) SubscriberDbGrpc._try_to_call( - lambda: self._subscriber_stub.AddSubscriber(sub_data)) + lambda: self._subscriber_stub.AddSubscriber(sub_data) + ) self._check_invariants() def delete_subscriber(self, sid): @@ -163,6 +192,13 @@ def list_subscriber_sids(self): sids = ['IMSI' + sid.id for sid in sids_pb] return sids + def config_apn_details(self, imsi, apn_list): + sid = SIDUtils.to_pb(imsi) + update_sub = self._get_apn_data(sid, apn_list) + SubscriberDbGrpc._try_to_call( + lambda: self._subscriber_stub.UpdateSubscriber(update_sub) + ) + def clean_up(self): # Remove all sids for sid in self.list_subscriber_sids(): diff --git a/lte/gateway/python/integ_tests/defs.mk b/lte/gateway/python/integ_tests/defs.mk index a31890d1b027..1422d1dd9643 100644 --- a/lte/gateway/python/integ_tests/defs.mk +++ b/lte/gateway/python/integ_tests/defs.mk @@ -78,7 +78,7 @@ s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_multi_ue.py \ s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer.py \ s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_deactivate.py \ s1aptests/test_attach_detach_disconnect_default_pdn.py \ -s1aptests/test_attach_detach_max_pdns.py \ +s1aptests/test_attach_detach_maxbearers_twopdns.py \ s1aptests/test_attach_detach_multiple_secondary_pdn.py \ s1aptests/test_attach_ul_udp_data.py \ s1aptests/test_attach_ul_tcp_data.py diff --git a/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py b/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py index 75b70118da04..cfeab23b9a75 100644 --- a/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py +++ b/lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py @@ -298,6 +298,10 @@ def add_sub(self, num_ues=1): self._subscriber_client.wait_for_changes() return subscribers + def config_apn_data(self, imsi, apn_list): + """ Add APN details """ + self._subscriber_client.config_apn_details(imsi, apn_list) + def cleanup(self): """ Cleanup added subscriber from subscriberdb """ self._subscriber_client.clean_up() diff --git a/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py b/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py index f2a3214ef451..4fbfc02a52d6 100644 --- a/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py +++ b/lte/gateway/python/integ_tests/s1aptests/s1ap_wrapper.py @@ -174,6 +174,10 @@ def configUEDevice(self, num_ues): self._configuredUes.append(reqs[i]) self.check_gw_health_after_ue_load() + def configAPN(self, imsi, apn_list): + """ Configure the APN """ + self._sub_util.config_apn_data(imsi, apn_list) + def configUEDevice_ues_same_imsi(self, num_ues): """ Configure the device on the UE side with same IMSI and having different ue-id""" diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_disconnect_default_pdn.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_disconnect_default_pdn.py index 4dbcf8c64241..b59642053974 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_disconnect_default_pdn.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_disconnect_default_pdn.py @@ -24,8 +24,9 @@ def tearDown(self): def test_disconnect_default_pdn(self): """ Attach a single UE and send PDN disconnect request for the default bearer """ + num_ue = 1 - self._s1ap_wrapper.configUEDevice(1) + self._s1ap_wrapper.configUEDevice(num_ue) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id print( diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_max_pdns.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_maxbearers_twopdns.py similarity index 87% rename from lte/gateway/python/integ_tests/s1aptests/test_attach_detach_max_pdns.py rename to lte/gateway/python/integ_tests/s1aptests/test_attach_detach_maxbearers_twopdns.py index f65a2154a214..03124985c684 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_max_pdns.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_maxbearers_twopdns.py @@ -15,7 +15,7 @@ from integ_tests.s1aptests.s1ap_utils import SpgwUtil -class TestMaximumBearersPerUe(unittest.TestCase): +class TestMaximumBearersTwoPdnsPerUe(unittest.TestCase): def setUp(self): self._s1ap_wrapper = s1ap_wrapper.TestWrapper() self._spgw_util = SpgwUtil() @@ -23,16 +23,37 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_max_bearers_per_ue(self): + def testMaxBearersTwoPdnsPerUe(self): """ Attach a single UE and send standalone PDN Connectivity Request + add 9 dedicated bearers + detach""" num_ues = 1 + self._s1ap_wrapper.configUEDevice(num_ues) + # 1 oai PDN + 1 dedicated bearer, 1 ims pdn + 8 dedicated bearers loop = 8 + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + for i in range(num_ues): req = self._s1ap_wrapper.ue_req + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + ue_id = req.ue_id print( "********************* Running End to End attach for UE id ", diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_secondary_pdn.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_secondary_pdn.py index ef765fdb68a2..597f5cceff6b 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_secondary_pdn.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_multiple_secondary_pdn.py @@ -21,15 +21,43 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_multiple_seconday_pdn_conn_req(self): + def test_multiple_secondary_pdn_conn_req(self): """ Attach a single UE + add 2 PDN Connections + disconnect """ num_pdns = 2 bearer_ids = [] - apn = ["ims", "internet"] - self._s1ap_wrapper.configUEDevice(1) + num_ue = 1 + self._s1ap_wrapper.configUEDevice(num_ue) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + + # internet APN + internet = { + "apn_name": "internet", # APN-name + "qci": 9, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 250000000, # MBR UL + "mbr_dl": 150000000, # MBR DL + } + # ims APN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims, internet] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) print( "*********************** Running End to End attach for UE id ", ue_id, @@ -46,6 +74,8 @@ def test_multiple_seconday_pdn_conn_req(self): self._s1ap_wrapper._s1_util.receive_emm_info() time.sleep(2) + # APNs of the secondary PDNs + apn = ["ims", "internet"] for i in range(num_pdns): # Send PDN Connectivity Request self._s1ap_wrapper.sendPdnConnectivityReq(ue_id, apn[i]) diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn.py index 582a71ced96a..659a6437e35f 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn.py @@ -21,13 +21,32 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_req(self): + def test_secondary_pdn_conn_req(self): """ Attach a single UE and send standalone PDN Connectivity Request """ + num_ue = 1 - self._s1ap_wrapper.configUEDevice(1) + self._s1ap_wrapper.configUEDevice(num_ue) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) print( "************************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_dedicated_bearer.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_dedicated_bearer.py index b5c3d94f5184..e480783ed45e 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_dedicated_bearer.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_dedicated_bearer.py @@ -23,16 +23,35 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_disconn_ded_bearer(self): + def test_secondary_pdn_disconn_ded_bearer(self): """ Attach a single UE and send standalone PDN Connectivity Request + add dedicated bearer to each default bearer + disconnect dedicated bearer""" num_ues = 1 + self._s1ap_wrapper.configUEDevice(num_ues) + req = self._s1ap_wrapper.ue_req for i in range(num_ues): - req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "********************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_invalid_bearer.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_invalid_bearer.py index dbc71428898f..94123f5cca26 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_invalid_bearer.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_disconnect_invalid_bearer.py @@ -21,13 +21,34 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_disconn_invalid_bearer_id(self): + def test_secondary_pdn_disconn_invalid_bearer_id(self): """ Attach a single UE + send standalone PDN Connectivity Request + send PDN disconnect with invalid bearer id """ - self._s1ap_wrapper.configUEDevice(1) + num_ue = 1 + + self._s1ap_wrapper.configUEDevice(num_ue) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + # Declare an array of len 15 as the bearer id ranges from 5-15 length = 15 bearer_idx = [0] * length diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_looped.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_looped.py index 369cd962e1e7..e07999378793 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_looped.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_looped.py @@ -21,20 +21,41 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_looped(self): + def test_secondary_pdn_conn_looped(self): """ Attach a single UE and send standalone PDN Connectivity Request + detach. Repeat 3 times """ - self._s1ap_wrapper.configUEDevice(1) + num_ues = 1 + + self._s1ap_wrapper.configUEDevice(num_ues) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id loop = 3 + + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "************************* Running End to End attach for UE id ", ue_id, ) # Attach - for i in range(loop): + for _ in range(loop): time.sleep(5) self._s1ap_wrapper.s1_util.attach( ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_multi_ue.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_multi_ue.py index bcbdd8e93c10..a41f4266f905 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_multi_ue.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_multi_ue.py @@ -21,15 +21,35 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_req_multi_ue(self): + def test_secondary_pdn_conn_req_multi_ue(self): """ attach/detach + PDN Connectivity Requests with 4 UEs """ num_ues = 4 ue_ids = [] bearer_ids = [] + self._s1ap_wrapper.configUEDevice(num_ues) + + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + for _ in range(num_ues): req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "******************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_no_disconnect.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_no_disconnect.py index f63ac22be61a..80cbab4ea5fe 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_no_disconnect.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_no_disconnect.py @@ -21,13 +21,33 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_req(self): + def test_secondary_pdn_conn_req(self): """ Attach a single UE , send standalone PDN Connectivity Request and detach without sending PDN Disconnect """ - self._s1ap_wrapper.configUEDevice(1) + num_ue = 1 + + self._s1ap_wrapper.configUEDevice(num_ue) req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "************************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer.py index 3673ad787825..03aea9481087 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer.py @@ -23,15 +23,33 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_ded_bearer(self): + def test_secondary_pdn_conn_ded_bearer(self): """ Attach a single UE and send standalone PDN Connectivity Request + add dedicated bearer to each default bearer """ num_ues = 1 + self._s1ap_wrapper.configUEDevice(num_ues) for i in range(num_ues): req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) print( "********************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_deactivate.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_deactivate.py index a7381fb869dc..572532bfaabe 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_deactivate.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_deactivate.py @@ -23,16 +23,35 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_ded_bearer_deactivate(self): + def test_secondary_pdn_conn_ded_bearer_deactivate(self): """ Attach a single UE and send standalone PDN Connectivity Request + add dedicated bearer to each default bearer + deactivate dedicated bearers + detach""" num_ues = 1 + self._s1ap_wrapper.configUEDevice(num_ues) for i in range(num_ues): req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "********************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_looped.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_looped.py index ab065cfff8a1..e8ef398b99b2 100755 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_looped.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_looped.py @@ -23,17 +23,36 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_conn_ded_bearer_looped(self): + def test_secondary_pdn_conn_ded_bearer_looped(self): """ Attach a single UE and send standalone PDN Connectivity Request + add dedicated bearer to each default bearer repeat 3 times """ num_ues = 1 loop = 3 + self._s1ap_wrapper.configUEDevice(num_ues) for i in range(num_ues): req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "********************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_multi_ue.py b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_multi_ue.py index f75e0b0b72eb..f64695cd9e3d 100644 --- a/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_multi_ue.py +++ b/lte/gateway/python/integ_tests/s1aptests/test_attach_detach_secondary_pdn_with_dedicated_bearer_multi_ue.py @@ -23,16 +23,35 @@ def setUp(self): def tearDown(self): self._s1ap_wrapper.cleanup() - def test_seconday_pdn_with_dedicated_bearer_multi_ue(self): + def test_secondary_pdn_with_dedicated_bearer_multi_ue(self): """ attach/detach + PDN Connectivity Requests + dedicated bearer for 4 UEs """ num_ues = 4 ue_ids = [] bearer_ids = [] + self._s1ap_wrapper.configUEDevice(num_ues) for _ in range(num_ues): req = self._s1ap_wrapper.ue_req ue_id = req.ue_id + # APN of the secondary PDN + ims = { + "apn_name": "ims", # APN-name + "qci": 5, # qci + "priority": 15, # priority + "pre_cap": 0, # preemption-capability + "pre_vul": 0, # preemption-vulnerability + "mbr_ul": 200000000, # MBR UL + "mbr_dl": 100000000, # MBR DL + } + + # APN list to be configured + apn_list = [ims] + + self._s1ap_wrapper.configAPN( + "IMSI" + "".join([str(i) for i in req.imsi]), apn_list + ) + print( "******************* Running End to End attach for UE id ", ue_id, diff --git a/lte/gateway/python/magma/pipelined/qos/qos_rate_limiting.py b/lte/gateway/python/magma/pipelined/qos/qos_rate_limiting.py index db8440316efc..45b3470b41e3 100644 --- a/lte/gateway/python/magma/pipelined/qos/qos_rate_limiting.py +++ b/lte/gateway/python/magma/pipelined/qos/qos_rate_limiting.py @@ -9,12 +9,23 @@ from collections import defaultdict from collections import deque -from typing import Dict # noqa +from typing import Dict, List # noqa +import os import shlex import subprocess import logging +# this code can run in either a docker container(CWAG) or as a native +# python process(AG). When we are running as a root there is no need for +# using sudo. (related to task T63499189 where tc commands failed since +# sudo wasn't available in the docker container) +def argSplit(cmd: str) -> List[str]: + args = [] if os.geteuid() == 0 else ["sudo"] + args.extend(shlex.split(cmd)) + return args + + class TrafficClass: """ Creates/Deletes queues in linux. Using Qdiscs for flow based @@ -22,61 +33,60 @@ class TrafficClass: """ @staticmethod - def delete_class(intf: str, qid: int) -> None: - tc_cmd = "sudo tc class del dev {} classid 1:{}".format(intf, qid) - filter_cmd = "sudo tc filter del dev {} protocol ip parent 1: prio 1 \ + def delete_class(intf: str, qid: int) -> None: + tc_cmd = "tc class del dev {} classid 1:{}".format(intf, qid) + filter_cmd = "tc filter del dev {} protocol ip parent 1: prio 1 \ handle {qid} fw flowid 1:{qid}".format(intf, qid=qid) - args = shlex.split(filter_cmd) + args = argSplit(filter_cmd) ret = subprocess.call(args) logging.debug("add filter ret %d", ret) - args = shlex.split(tc_cmd) + args = argSplit(tc_cmd) ret = subprocess.call(args) logging.debug("qdisc del q qid %s ret %d", qid, ret) @staticmethod def create_class(intf: str, qid: int, max_bw: int) -> None: - tc_cmd = "sudo tc class add dev {} parent 1:fffe classid 1:{} htb \ + tc_cmd = "tc class add dev {} parent 1:fffe classid 1:{} htb \ rate 12000 ceil {}".format(intf, qid, max_bw) - qdisc_cmd = "sudo tc qdisc add dev {} parent 1:{} \ + qdisc_cmd = "tc qdisc add dev {} parent 1:{} \ fq_codel".format(intf, qid) - filter_cmd = "sudo tc filter add dev {} protocol ip parent 1: prio 1 \ + filter_cmd = "tc filter add dev {} protocol ip parent 1: prio 1 \ handle {qid} fw flowid 1:{qid}".format(intf, qid=qid) - args = shlex.split(tc_cmd) + args = argSplit(tc_cmd) ret = subprocess.call(args) logging.debug("create class qid %s ret %d", qid, ret) - args = shlex.split(qdisc_cmd) + args = argSplit(qdisc_cmd) ret = subprocess.call(args) logging.debug("create qdisc ret %d", ret) - args = shlex.split(filter_cmd) + args = argSplit(filter_cmd) ret = subprocess.call(args) logging.debug("add filter ret %d", ret) @staticmethod def init_qdisc(intf: str) -> None: - qdisc_cmd = "sudo tc qdisc add dev {} root handle 1: htb".format(intf) - parent_q_cmd = "sudo tc class add dev {} parent 1: classid 1:fffe htb \ + qdisc_cmd = "tc qdisc add dev {} root handle 1: htb".format(intf) + parent_q_cmd = "tc class add dev {} parent 1: classid 1:fffe htb \ rate 1Gbit ceil 1Gbit".format(intf) - tc_cmd = "sudo tc class add dev {} parent 1:fffe classid 1:1 htb \ + tc_cmd = "tc class add dev {} parent 1:fffe classid 1:1 htb \ rate 12Kbit ceil 1Gbit".format(intf) - args = shlex.split(qdisc_cmd) + args = argSplit(qdisc_cmd) ret = subprocess.call(args) logging.debug("qdisc init ret %d", ret) - args = shlex.split(parent_q_cmd) + args = argSplit(parent_q_cmd) ret = subprocess.call(args) logging.debug("add class 1: ret %d", ret) - args = shlex.split(tc_cmd) + args = argSplit(tc_cmd) ret = subprocess.call(args) logging.debug("add class 1:fffe ret %d", ret) - class QosQueueMap: """ Creates/Deletes queues in linux. Using Qdiscs for flow based diff --git a/lte/gateway/python/magma/redirectd/main.py b/lte/gateway/python/magma/redirectd/main.py index 612607a93902..7bd5342b4014 100644 --- a/lte/gateway/python/magma/redirectd/main.py +++ b/lte/gateway/python/magma/redirectd/main.py @@ -13,22 +13,15 @@ from magma.common.service import MagmaService from magma.configuration.service_configs import get_service_config_value from magma.redirectd.redirect_server import run_flask -from magma.redirectd.scribe_logging import RedirectScribeLogger from lte.protos.mconfig import mconfigs_pb2 def main(): """ - main() for redirectd - - Initializes the scribe logger, starts the server threads + main() for redirectd. Starts the server threads. """ service = MagmaService('redirectd', mconfigs_pb2.RedirectD()) - scribe_logger = None - if service.config.get('scribe_logging_enabled', False): - scribe_logger = RedirectScribeLogger(service.loop) - redirect_ip = get_service_config_value( 'pipelined', 'bridge_ip_address', None, @@ -40,8 +33,7 @@ def main(): http_port = service.config['http_port'] exit_callback = get_exit_server_thread_callback(service) - run_server_thread( - run_flask, redirect_ip, http_port, scribe_logger, exit_callback) + run_server_thread(run_flask, redirect_ip, http_port, exit_callback) # Run the service loop service.run() @@ -57,11 +49,11 @@ def on_exit_server_thread(): return on_exit_server_thread -def run_server_thread(target, ip, port, scribe_logger, exit_callback): +def run_server_thread(target, ip, port, exit_callback): """ Start redirectd service server thread """ thread = threading.Thread( target=target, - args=(ip, port, scribe_logger, exit_callback)) + args=(ip, port, exit_callback)) thread.daemon = True thread.start() diff --git a/lte/gateway/python/magma/redirectd/redirect_server.py b/lte/gateway/python/magma/redirectd/redirect_server.py index 7cce2922506f..629160ddf9a2 100644 --- a/lte/gateway/python/magma/redirectd/redirect_server.py +++ b/lte/gateway/python/magma/redirectd/redirect_server.py @@ -32,10 +32,6 @@ def flask_redirect(**kwargs): response = kwargs['get_redirect_response'](request.remote_addr) redirect_info = RedirectInfo(request.remote_addr, response) - scribe_client = kwargs['scribe_client'] - if scribe_client is not None: - scribe_client.log_to_scribe(redirect_info) - logging.info( "Request from {}: sent http code {} - redirected to {}".format( redirect_info.subscriber_ip, response.http_code, @@ -52,7 +48,7 @@ def flask_redirect(**kwargs): return redirect(response.redirect_address, code=response.http_code) -def setup_flask_server(scribe_client): +def setup_flask_server(): app = Flask(__name__) url_dict = RedirectDict() @@ -73,24 +69,26 @@ def get_redirect_response(src_ip): return ServerResponse(redirect_addr, HTTP_REDIRECT) app.add_url_rule( - '/', 'index', flask_redirect, - defaults={'get_redirect_response': get_redirect_response, - 'scribe_client': scribe_client} + '/', + 'index', + flask_redirect, + defaults={'get_redirect_response': get_redirect_response}, ) app.add_url_rule( - '/', 'index', flask_redirect, - defaults={'get_redirect_response': get_redirect_response, - 'scribe_client': scribe_client} + '/', + 'index', + flask_redirect, + defaults={'get_redirect_response': get_redirect_response} ) return app -def run_flask(ip, port, scribe_logger, exit_callback): +def run_flask(ip, port, exit_callback): """ Runs the flask server, this is a daemon, so it exits when redirectd exits """ - app = setup_flask_server(scribe_logger) + app = setup_flask_server() server = wsgiserver.WSGIServer(app, host=ip, port=port) try: diff --git a/lte/gateway/python/magma/redirectd/scribe_logging.py b/lte/gateway/python/magma/redirectd/scribe_logging.py deleted file mode 100644 index 720c128e7938..000000000000 --- a/lte/gateway/python/magma/redirectd/scribe_logging.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -Copyright (c) 2018-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" - -import datetime - -import snowflake -from orc8r.protos.logging_service_pb2 import LogEntry - -from magma.common.scribe_client import ScribeClient - - -class RedirectScribeLogger: - LOGGING_CATEGORY = "perfpipe_magma_redirectd_stats" - - def __init__(self, event_loop): - self._client = ScribeClient(loop=event_loop) - - def log_to_scribe(self, redirect_info): - self._client.log_to_scribe_with_sampling_rate( - [self.generate_redirect_log_entry(redirect_info)] - ) - - def generate_redirect_log_entry(self, redirect_info): - time = int(datetime.datetime.now().timestamp()) - hw_id = snowflake.snowflake() - int_map = {'server_response': redirect_info.server_response.http_code} - normal_map = { - 'subscriber_ip': redirect_info.subscriber_ip, - 'redirect_address': redirect_info.server_response.redirect_address - } - - return LogEntry(category=self.LOGGING_CATEGORY, - time=int(time), - hw_id=hw_id, - normal_map=normal_map, - int_map=int_map) diff --git a/lte/gateway/python/magma/redirectd/tests/test_redirect.py b/lte/gateway/python/magma/redirectd/tests/test_redirect.py index bd00d4da7064..a9ad06ae5f06 100644 --- a/lte/gateway/python/magma/redirectd/tests/test_redirect.py +++ b/lte/gateway/python/magma/redirectd/tests/test_redirect.py @@ -18,10 +18,9 @@ class RedirectdTest(unittest.TestCase): def setUp(self): """ - Sets up a test version of the redirect server, mocks scribe/url_dict + Sets up a test version of the redirect server, mocks url_dict """ - self._scribe_client = MagicMock() - app = setup_flask_server(self._scribe_client) + app = setup_flask_server() app.config['TESTING'] = True test_dict = { @@ -48,29 +47,15 @@ def get_resp(src_ip): def test_302_homepage(self): """ Assert 302 http response, proper reponse headers with new dest url - - Correct scribe logging """ resp = self.client.get('/', environ_base={'REMOTE_ADDR': '192.5.82.1'}) self.assertEqual(resp.status_code, HTTP_REDIRECT) self.assertEqual(resp.headers['Location'], 'http://www.example.com/') - self._scribe_client.log_to_scribe.assert_called_with( - RedirectInfo( - subscriber_ip='192.5.82.1', - server_response=ServerResponse( - redirect_address='http://www.example.com/', - http_code=HTTP_REDIRECT - ) - ) - ) - def test_302_with_path(self): """ Assert 302 http response, proper reponse headers with new dest url - - Correct scribe logging """ resp = self.client.get('/generate_204', environ_base={'REMOTE_ADDR': '192.5.82.1'}) @@ -78,31 +63,10 @@ def test_302_with_path(self): self.assertEqual(resp.status_code, HTTP_REDIRECT) self.assertEqual(resp.headers['Location'], 'http://www.example.com/') - self._scribe_client.log_to_scribe.assert_called_with( - RedirectInfo( - subscriber_ip='192.5.82.1', - server_response=ServerResponse( - redirect_address='http://www.example.com/', - http_code=HTTP_REDIRECT - ) - ) - ) - def test_404(self): """ Assert 404 http response - - Correct scribe logging """ resp = self.client.get('/', environ_base={'REMOTE_ADDR': '127.0.0.1'}) self.assertEqual(resp.status_code, HTTP_NOT_FOUND) - - self._scribe_client.log_to_scribe.assert_called_with( - RedirectInfo( - subscriber_ip='127.0.0.1', - server_response=ServerResponse( - redirect_address='404.html', http_code=404 - ) - ) - ) diff --git a/lte/gateway/python/magma/subscriberdb/processor.py b/lte/gateway/python/magma/subscriberdb/processor.py index 4ae9e77a5cef..dac51a742685 100644 --- a/lte/gateway/python/magma/subscriberdb/processor.py +++ b/lte/gateway/python/magma/subscriberdb/processor.py @@ -9,8 +9,11 @@ """ import abc -from lte.protos.subscriberdb_pb2 import GSMSubscription, LTESubscription, \ - SubscriberID +from lte.protos.subscriberdb_pb2 import ( + GSMSubscription, + LTESubscription, + SubscriberID, +) from magma.subscriberdb.sid import SIDUtils from .crypto.gsm import UnsafePreComputedA3A8 @@ -262,6 +265,18 @@ def set_next_lte_auth_seq(self, imsi, seq): with self._store.edit_subscriber(sid) as subs: subs.state.lte_auth_next_seq = seq + def get_sub_data(self, imsi): + """ + Returns the complete subscriber profile for subscriber. + Args: + imsi: IMSI string + Returns: + SubscriberData proto struct + """ + sid = SIDUtils.to_str(SubscriberID(id=imsi, type=SubscriberID.IMSI)) + sub_data = self._store.get_subscriber_data(sid) + return sub_data + @classmethod def seq_to_sqn(cls, seq, ind=0): """Compute the 48 bit SQN given a seq given the formula defined in diff --git a/lte/gateway/python/magma/subscriberdb/protocols/s6a_proxy_servicer.py b/lte/gateway/python/magma/subscriberdb/protocols/s6a_proxy_servicer.py index 172abeeb85e3..19817f39890c 100644 --- a/lte/gateway/python/magma/subscriberdb/protocols/s6a_proxy_servicer.py +++ b/lte/gateway/python/magma/subscriberdb/protocols/s6a_proxy_servicer.py @@ -91,6 +91,12 @@ def UpdateLocation(self, request, context): logging.warning('Subscriber not found for ULR: %s', e) return ula + try: + sub_data = self.lte_processor.get_sub_data(imsi) + except SubscriberNotFoundError as e: + ula.error_code = s6a_proxy_pb2.USER_UNKNOWN + logging.warning("Subscriber not found for ULR: %s", e) + return ula ula.error_code = s6a_proxy_pb2.SUCCESS ula.default_context_id = 0 ula.total_ambr.max_bandwidth_ul = profile.max_ul_bit_rate @@ -99,7 +105,7 @@ def UpdateLocation(self, request, context): apn = ula.apn.add() apn.context_id = 0 - apn.service_selection = 'oai.ipv4' + apn.service_selection = "oai.ipv4" apn.qos_profile.class_id = 9 apn.qos_profile.priority_level = 15 apn.qos_profile.preemption_capability = 1 @@ -109,32 +115,27 @@ def UpdateLocation(self, request, context): apn.ambr.max_bandwidth_dl = profile.max_dl_bit_rate apn.pdn = s6a_proxy_pb2.UpdateLocationAnswer.APNConfiguration.IPV4 - # TODO - Add APN config through CLI - # ims apn - apn_ims = ula.apn.add() - apn_ims.context_id = 1 - apn_ims.service_selection = 'ims' - apn_ims.qos_profile.class_id = 5 - apn_ims.qos_profile.priority_level = 15 - apn_ims.qos_profile.preemption_capability = 1 - apn_ims.qos_profile.preemption_vulnerability = 0 - - apn_ims.ambr.max_bandwidth_ul = profile.max_ul_bit_rate - apn_ims.ambr.max_bandwidth_dl = profile.max_dl_bit_rate - apn_ims.pdn = s6a_proxy_pb2.UpdateLocationAnswer.APNConfiguration.IPV4 - - # internet apn - apn_internet = ula.apn.add() - apn_internet.context_id = 2 - apn_internet.service_selection = 'internet' - apn_internet.qos_profile.class_id = 1 - apn_internet.qos_profile.priority_level = 5 - apn_internet.qos_profile.preemption_capability = 1 - apn_internet.qos_profile.preemption_vulnerability = 0 - - apn_internet.ambr.max_bandwidth_ul = profile.max_ul_bit_rate - apn_internet.ambr.max_bandwidth_dl = profile.max_dl_bit_rate - apn_internet.pdn = s6a_proxy_pb2.UpdateLocationAnswer.APNConfiguration.IPV4 - + # Secondary PDN + context_id = 0 + for apn in sub_data.non_3gpp.apn_config: + sec_apn = ula.apn.add() + # Context id 0 is assigned to oai.ipv4 apn. So start from 1 + sec_apn.context_id = context_id + 1 + context_id += 1 + sec_apn.service_selection = apn.service_selection + sec_apn.qos_profile.class_id = apn.qos_profile.class_id + sec_apn.qos_profile.priority_level = apn.qos_profile.priority_level + sec_apn.qos_profile.preemption_capability = ( + apn.qos_profile.preemption_capability + ) + sec_apn.qos_profile.preemption_vulnerability = ( + apn.qos_profile.preemption_vulnerability + ) + + sec_apn.ambr.max_bandwidth_ul = apn.ambr.max_bandwidth_ul + sec_apn.ambr.max_bandwidth_dl = apn.ambr.max_bandwidth_dl + sec_apn.pdn = ( + s6a_proxy_pb2.UpdateLocationAnswer.APNConfiguration.IPV4 + ) return ula diff --git a/lte/gateway/python/magma/subscriberdb/rpc_servicer.py b/lte/gateway/python/magma/subscriberdb/rpc_servicer.py index dae42f4f362b..671a6c1ba1fe 100644 --- a/lte/gateway/python/magma/subscriberdb/rpc_servicer.py +++ b/lte/gateway/python/magma/subscriberdb/rpc_servicer.py @@ -14,7 +14,10 @@ from magma.common.rpc_utils import return_void from magma.subscriberdb.sid import SIDUtils -from .store.base import DuplicateSubscriberError, SubscriberNotFoundError +from .store.base import ( + DuplicateSubscriberError, + SubscriberNotFoundError, +) class SubscriberDBRpcServicer(subscriberdb_pb2_grpc.SubscriberDBServicer): @@ -63,7 +66,7 @@ def UpdateSubscriber(self, request, context): """ sid = SIDUtils.to_str(request.data.sid) try: - with self._store.edit_subscriber(sid) as subs: + with self._store.edit_subscriber(sid, request) as subs: request.mask.MergeMessage(request.data, subs) except SubscriberNotFoundError: context.set_details('Subscriber not found: %s' % sid) diff --git a/lte/gateway/python/magma/subscriberdb/store/sqlite.py b/lte/gateway/python/magma/subscriberdb/store/sqlite.py index 91dc1a9a06db..56873f42c7d2 100644 --- a/lte/gateway/python/magma/subscriberdb/store/sqlite.py +++ b/lte/gateway/python/magma/subscriberdb/store/sqlite.py @@ -14,7 +14,11 @@ from lte.protos.subscriberdb_pb2 import SubscriberData from magma.subscriberdb.sid import SIDUtils -from .base import BaseStore, DuplicateSubscriberError, SubscriberNotFoundError +from .base import ( + BaseStore, + DuplicateSubscriberError, + SubscriberNotFoundError, +) from .onready import OnDataReady @@ -66,31 +70,45 @@ def add_subscriber(self, subscriber_data): self._on_ready.add_subscriber(subscriber_data) @contextmanager - def edit_subscriber(self, subscriber_id): + def edit_subscriber(self, subscriber_id, request=None): """ Context manager to modify the subscriber data. """ with self.conn: - res = self.conn.execute("SELECT data FROM subscriberdb WHERE " - "subscriber_id = ?", (subscriber_id, )) + res = self.conn.execute( + "SELECT data FROM subscriberdb WHERE " "subscriber_id = ?", + (subscriber_id,), + ) row = res.fetchone() if not row: raise SubscriberNotFoundError(subscriber_id) subscriber_data = SubscriberData() subscriber_data.ParseFromString(row[0]) + # Manually update APN config as MergeMessage() does not work on + # repeated field + if request and request.data.non_3gpp.apn_config: + # Delete the existing APN config/s in the subscriberdb and add + # new APN config received + del subscriber_data.non_3gpp.apn_config[:] + for apn in request.data.non_3gpp.apn_config: + apn_config = subscriber_data.non_3gpp.apn_config.add() + self._update_apn(apn_config, apn) yield subscriber_data data_str = subscriber_data.SerializeToString() - self.conn.execute("UPDATE subscriberdb SET data = ? " - "WHERE subscriber_id = ?", - (data_str, subscriber_id)) + self.conn.execute( + "UPDATE subscriberdb SET data = ? " "WHERE subscriber_id = ?", + (data_str, subscriber_id), + ) def delete_subscriber(self, subscriber_id): """ Method that deletes a subscriber, if present. """ with self.conn: - self.conn.execute("DELETE FROM subscriberdb WHERE " - "subscriber_id = ?", (subscriber_id, )) + self.conn.execute( + "DELETE FROM subscriberdb WHERE " "subscriber_id = ?", + (subscriber_id,), + ) def delete_all_subscribers(self): """ @@ -175,3 +193,21 @@ def resync(self, subscribers): def on_ready(self): return self._on_ready.event.wait() + + def _update_apn(self, apn_config, apn_data): + """ + Method that populates apn data. + """ + apn_config.service_selection = apn_data.service_selection + apn_config.qos_profile.class_id = apn_data.qos_profile.class_id + apn_config.qos_profile.priority_level = ( + apn_data.qos_profile.priority_level + ) + apn_config.qos_profile.preemption_capability = ( + apn_data.qos_profile.preemption_capability + ) + apn_config.qos_profile.preemption_vulnerability = ( + apn_data.qos_profile.preemption_vulnerability + ) + apn_config.ambr.max_bandwidth_ul = apn_data.ambr.max_bandwidth_ul + apn_config.ambr.max_bandwidth_dl = apn_data.ambr.max_bandwidth_dl diff --git a/lte/gateway/python/scripts/subscriber_cli.py b/lte/gateway/python/scripts/subscriber_cli.py index ae188f7c28ca..839beca70155 100755 --- a/lte/gateway/python/scripts/subscriber_cli.py +++ b/lte/gateway/python/scripts/subscriber_cli.py @@ -11,8 +11,13 @@ import argparse -from lte.protos.subscriberdb_pb2 import GSMSubscription, LTESubscription, \ - SubscriberData, SubscriberState, SubscriberUpdate +from lte.protos.subscriberdb_pb2 import ( + GSMSubscription, + LTESubscription, + SubscriberData, + SubscriberState, + SubscriberUpdate, +) from lte.protos.subscriberdb_pb2_grpc import SubscriberDBStub from orc8r.protos.common_pb2 import Void @@ -42,10 +47,7 @@ def add_subscriber(client, args): lte.auth_opc = bytes.fromhex(args.lte_auth_opc) data = SubscriberData( - sid=SIDUtils.to_pb(args.sid), - gsm=gsm, - lte=lte, - state=state, + sid=SIDUtils.to_pb(args.sid), gsm=gsm, lte=lte, state=state, ) client.AddSubscriber(data) @@ -56,6 +58,7 @@ def update_subscriber(client, args): update.data.sid.CopyFrom(SIDUtils.to_pb(args.sid)) gsm = update.data.gsm lte = update.data.lte + non_3gpp = update.data.non_3gpp fields = update.mask.paths if len(args.gsm_auth_tuple) != 0: @@ -76,10 +79,43 @@ def update_subscriber(client, args): fields.append('state.lte_auth_next_seq') if args.lte_auth_opc is not None: + lte.state = LTESubscription.ACTIVE lte.auth_opc = bytes.fromhex(args.lte_auth_opc) fields.append('lte.state') fields.append('lte.auth_opc') + if args.apn_config is not None: + apn_name = "apn_name" + qci = "qci" + priority = "priority" + pre_cap = "preemption_capability" + pre_vul = "preemption_vulnerability" + ul = "mbr_uplink" + dl = "mbr_downlink" + apn_keys = (apn_name, qci, priority, pre_cap, pre_vul, ul, dl) + apn_data = args.apn_config + for apn_d in apn_data: + apn_val = apn_d.split(",") + if len(apn_val) != 7: + print( + "Incorrect APN parameters." + "Please check: subscriber_cli.py update -h" + ) + return + apn_dict = dict(zip(apn_keys, apn_val)) + apn_config = non_3gpp.apn_config.add() + apn_config.service_selection = apn_dict[apn_name] + apn_config.qos_profile.class_id = int(apn_dict[qci]) + apn_config.qos_profile.priority_level = int(apn_dict[priority]) + apn_config.qos_profile.preemption_capability = int( + apn_dict[pre_cap] + ) + apn_config.qos_profile.preemption_vulnerability = int( + apn_dict[pre_vul] + ) + apn_config.ambr.max_bandwidth_ul = int(apn_dict[ul]) + apn_config.ambr.max_bandwidth_dl = int(apn_dict[dl]) + client.UpdateSubscriber(update) @@ -109,25 +145,61 @@ def create_parser(): formatter_class=argparse.ArgumentDefaultsHelpFormatter) # Add subcommands - subparsers = parser.add_subparsers(title='subcommands', dest='cmd') - parser_add = subparsers.add_parser('add', help='Add a new subscriber') - parser_del = subparsers.add_parser('delete', help='Delete a subscriber') - parser_update = subparsers.add_parser('update', help='Update a subscriber') - parser_get = subparsers.add_parser('get', help='Get subscriber data') - parser_list = subparsers.add_parser('list', help='List all subscriber ids') + subparsers = parser.add_subparsers(title="subcommands", dest="cmd") + parser_add = subparsers.add_parser("add", help="Add a new subscriber") + parser_del = subparsers.add_parser("delete", help="Delete a subscriber") + parser_update = subparsers.add_parser("update", help="Update a subscriber") + parser_get = subparsers.add_parser("get", help="Get subscriber data") + parser_list = subparsers.add_parser("list", help="List all subscriber ids") # Add arguments - for cmd in [parser_add, parser_del, parser_update, parser_get]: - cmd.add_argument('sid', help='Subscriber identifier') - for cmd in [parser_add, parser_update]: - cmd.add_argument('--gsm-auth-tuple', default=[], action='append', - help='GSM authentication tuple (hex digits)') - cmd.add_argument('--lte-auth-key', help='LTE authentication key') - cmd.add_argument('--lte-auth-opc', help='LTE authentication opc') - cmd.add_argument('--lte-auth-next-seq', type=int, - help='LTE authentication seq number (hex digits)') - - # Add function callbacks + for cmd in [ + parser_add, + parser_del, + parser_update, + parser_get, + ]: + cmd.add_argument("sid", help="Subscriber identifier") + for cmd in [parser_add]: + cmd.add_argument( + "--gsm-auth-tuple", + default=[], + action="append", + help="GSM authentication tuple (hex digits)", + ) + cmd.add_argument("--lte-auth-key", help="LTE authentication key") + cmd.add_argument("--lte-auth-opc", help="LTE authentication opc") + cmd.add_argument( + "--lte-auth-next-seq", + type=int, + help="LTE authentication seq number (hex digits)", + ) + + for cmd in [parser_update]: + cmd.add_argument( + "--gsm-auth-tuple", + default=[], + action="append", + help="GSM authentication tuple (hex digits)", + ) + cmd.add_argument("--lte-auth-key", help="LTE authentication key") + cmd.add_argument("--lte-auth-opc", help="LTE authentication opc") + cmd.add_argument( + "--lte-auth-next-seq", + type=int, + help="LTE authentication seq number (hex digits)", + ) + cmd.add_argument( + "--apn-config", + action="append", + help="APN parameters to add/update in the order :" + " [apn-name, qci, priority, preemption-capability," + " preemption-vulnerability, mbr-ul, mbr-dl]" + " [e.g --apn-config ims,5,15,1,1,1000,2000 " + " --apn-config internet,9,1,0,0,3000,4000]", + ) + +# Add function callbacks parser_add.set_defaults(func=add_subscriber) parser_del.set_defaults(func=delete_subscriber) parser_update.set_defaults(func=update_subscriber) diff --git a/orc8r/cloud/configs/logger.yml b/orc8r/cloud/configs/logger.yml deleted file mode 100644 index 80515f51786b..000000000000 --- a/orc8r/cloud/configs/logger.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -# Copyright (c) 2016-present, Facebook, Inc. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -scribe_export_url: "http://localhost:8080" -scribe_app_id: "app_id" -scribe_app_secret: "app_secret" diff --git a/orc8r/cloud/deploy/terraform/orc8r-helm-aws/examples/basic/main.tf b/orc8r/cloud/deploy/terraform/orc8r-helm-aws/examples/basic/main.tf index 4ea14d4a3651..4b8d80b4178c 100644 --- a/orc8r/cloud/deploy/terraform/orc8r-helm-aws/examples/basic/main.tf +++ b/orc8r/cloud/deploy/terraform/orc8r-helm-aws/examples/basic/main.tf @@ -45,12 +45,12 @@ module orc8r-app { orc8r_db_host = module.orc8r.orc8r_db_host orc8r_db_name = module.orc8r.orc8r_db_name orc8r_db_user = module.orc8r.orc8r_db_user - orc8r_db_pass = module.orc8r.nms_db_pass + orc8r_db_pass = module.orc8r.orc8r_db_pass nms_db_host = module.orc8r.nms_db_host nms_db_name = module.orc8r.nms_db_name nms_db_user = module.orc8r.nms_db_user - nms_db_pass = module.orc8r.orc8r_db_pass + nms_db_pass = module.orc8r.nms_db_pass docker_registry = "registry.hub.docker.com/foobar" docker_user = "foobar" diff --git a/orc8r/cloud/docker/controller/supervisord.conf b/orc8r/cloud/docker/controller/supervisord.conf index 10068c6c590e..e8a0c16e1e56 100644 --- a/orc8r/cloud/docker/controller/supervisord.conf +++ b/orc8r/cloud/docker/controller/supervisord.conf @@ -1,6 +1,14 @@ [supervisord] nodaemon=true +[eventlistener:stdout] +command=python -m supervisor_logger +buffer_size=500 +events=PROCESS_LOG +result_handler=supervisor_logger:result_handler +stdout_logfile=NONE +stderr_logfile=NONE + [program:bootstrapper] command=/usr/bin/envdir /var/opt/magma/envdir /var/opt/magma/bin/bootstrapper -cak=/var/opt/magma/certs/bootstrapper.key -logtostderr=true -v=0 autorestart=true @@ -129,14 +137,6 @@ stderr_logfile=NONE stdout_events_enabled=true stderr_events_enabled=true -[eventlistener:stdout] -command=python -m supervisor_logger -buffer_size=500 -events=PROCESS_LOG -result_handler=supervisor_logger:result_handler -stdout_logfile=NONE -stderr_logfile=NONE - [program:device] command=/usr/bin/envdir /var/opt/magma/envdir /var/opt/magma/bin/device -logtostderr=true -v=0 stdout_logfile=/dev/fd/1 diff --git a/orc8r/cloud/go/blobstore/definitions.go b/orc8r/cloud/go/blobstore/definitions.go index 3953ba860b81..12586194e5d3 100644 --- a/orc8r/cloud/go/blobstore/definitions.go +++ b/orc8r/cloud/go/blobstore/definitions.go @@ -8,19 +8,9 @@ package blobstore -import ( - "os" -) - -func GetEnvWithDefault(variable string, defaultValue string) string { - value := os.Getenv(variable) - if len(value) == 0 { - value = defaultValue - } - return value -} +import "magma/orc8r/lib/go/definitions" var ( - SQLDriver = GetEnvWithDefault("SQL_DRIVER", "sqlite3") - DatabaseSource = GetEnvWithDefault("DATABASE_SOURCE", ":memory:") + SQLDriver = definitions.GetEnvWithDefault("SQL_DRIVER", "sqlite3") + DatabaseSource = definitions.GetEnvWithDefault("DATABASE_SOURCE", ":memory:") ) diff --git a/orc8r/cloud/go/blobstore/integration_test.go b/orc8r/cloud/go/blobstore/integration_test.go index 8f4973086b74..4fc2bdcdffc8 100644 --- a/orc8r/cloud/go/blobstore/integration_test.go +++ b/orc8r/cloud/go/blobstore/integration_test.go @@ -21,7 +21,7 @@ import ( ) func integration(t *testing.T, fact blobstore.BlobStorageFactory) { - // Check the contract for an empty datastore + // Check the contract for an empty data store err := fact.InitializeFactory() store, err := fact.StartTransaction(nil) assert.NoError(t, err) diff --git a/orc8r/cloud/go/blobstore/sql_test.go b/orc8r/cloud/go/blobstore/sql_test.go index 66ecb4b326d6..0ff67bdc7845 100644 --- a/orc8r/cloud/go/blobstore/sql_test.go +++ b/orc8r/cloud/go/blobstore/sql_test.go @@ -42,12 +42,12 @@ func TestSqlBlobStorage_ListKeys(t *testing.T) { setup: func(mock sqlmock.Sqlmock) { mock.ExpectQuery("SELECT \"key\" FROM network_table"). WithArgs("network", "type"). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) }, run: func(store blobstore.TransactionalBlobStorage) (interface{}, error) { return store.ListKeys("network", "type") }, - expectedError: errors.New("Mock query error"), + expectedError: errors.New("mock query error"), expectedResult: nil, } @@ -103,14 +103,14 @@ func TestSqlBlobStorage_Get(t *testing.T) { "WHERE \\(\\(network_id = \\$1 AND type = \\$2 AND \"key\" = \\$3\\)\\)", ). WithArgs("network", "t3", "k3"). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) }, run: func(store blobstore.TransactionalBlobStorage) (interface{}, error) { return store.Get("network", storage.TypeAndKey{Type: "t3", Key: "k3"}) }, - expectedError: errors.New("Mock query error"), + expectedError: errors.New("mock query error"), expectedResult: nil, } runCase(t, happyPath) @@ -149,14 +149,14 @@ func TestSqlBlobStorage_GetMany(t *testing.T) { setup: func(mock sqlmock.Sqlmock) { mock.ExpectQuery("SELECT type, \"key\", value, version FROM network_table"). WithArgs("network", "t1", "k1", "network", "t2", "k2"). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) }, run: func(store blobstore.TransactionalBlobStorage) (interface{}, error) { return store.GetMany("network", []storage.TypeAndKey{{Type: "t1", Key: "k1"}, {Type: "t2", Key: "k2"}}) }, - expectedError: errors.New("Mock query error"), + expectedError: errors.New("mock query error"), expectedResult: []blobstore.Blob{}, } @@ -352,7 +352,7 @@ func TestSqlBlobStorage_CreateOrUpdate(t *testing.T) { updatePrepare := mock.ExpectPrepare("UPDATE network_table") updatePrepare.ExpectExec(). WithArgs([]byte("goodbye"), 43, "network", "t1", "k1"). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) updatePrepare.WillBeClosed() }, @@ -367,7 +367,7 @@ func TestSqlBlobStorage_CreateOrUpdate(t *testing.T) { return nil, err }, - expectedError: errors.New("Error updating blob (network, t1, k1): Mock query error"), + expectedError: errors.New("Error updating blob (network, t1, k1): mock query error"), expectedResult: nil, } @@ -389,7 +389,7 @@ func TestSqlBlobStorage_CreateOrUpdate(t *testing.T) { mock.ExpectExec("INSERT INTO network_table"). WithArgs("network", "t2", "k2", []byte("world"), 1000). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) }, run: func(store blobstore.TransactionalBlobStorage) (interface{}, error) { @@ -403,7 +403,7 @@ func TestSqlBlobStorage_CreateOrUpdate(t *testing.T) { return nil, err }, - expectedError: errors.New("error creating blobs: Mock query error"), + expectedError: errors.New("error creating blobs: mock query error"), expectedResult: nil, } @@ -435,7 +435,7 @@ func TestSqlBlobStorage_Delete(t *testing.T) { setup: func(mock sqlmock.Sqlmock) { mock.ExpectExec("DELETE FROM network_table"). WithArgs("network", "t1", "k1", "network", "t2", "k2"). - WillReturnError(errors.New("Mock query error")) + WillReturnError(errors.New("mock query error")) }, run: func(store blobstore.TransactionalBlobStorage) (interface{}, error) { @@ -443,7 +443,7 @@ func TestSqlBlobStorage_Delete(t *testing.T) { return nil, err }, - expectedError: errors.New("Mock query error"), + expectedError: errors.New("mock query error"), expectedResult: nil, } @@ -474,7 +474,7 @@ func TestSqlBlobStorage_IncrementVersion(t *testing.T) { } func TestSqlBlobStorage_Integration(t *testing.T) { - // Use an in-memory sqlite datastore + // Use an in-memory sqlite data store db, err := sqorc.Open("sqlite3", ":memory:") if err != nil { t.Fatalf("Could not initialize sqlite DB: %s", err) diff --git a/orc8r/cloud/go/clock/clock.go b/orc8r/cloud/go/clock/clock.go index ee2a234ded2f..0474a8c6c843 100644 --- a/orc8r/cloud/go/clock/clock.go +++ b/orc8r/cloud/go/clock/clock.go @@ -32,25 +32,13 @@ func SetAndFreezeClock(t *testing.T, ti time.Time) { // UnfreezeClock will revert clock.Now()'s behavior to delegating to time.Now() func UnfreezeClock(t *testing.T) { + r := recover() if t == nil { panic("nice try") } c = &DefaultClock{} -} - -// GetUnfreezeClockDeferFunc returns a function which is expected to be -// deferred in the same context as a call to SetAndFreezeClock. -// The returned function will, when deferred, always unfreeze the clock, even -// in the case of a panic in the outer scope. -// Don't forget to call the returned function! -// defer GetUnfreezeClockDeferFunc(t)() -func GetUnfreezeClockDeferFunc(t *testing.T) func() { - return func() { - r := recover() - UnfreezeClock(t) - if r != nil { - panic(r) - } + if r != nil { + panic(r) } } diff --git a/orc8r/cloud/go/datastore/datastore.go b/orc8r/cloud/go/datastore/datastore.go deleted file mode 100644 index 90a523aabf9b..000000000000 --- a/orc8r/cloud/go/datastore/datastore.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -// Datastore provides a key-value pair interface for the cloud services -package datastore - -import ( - "errors" - "strings" -) - -type ValueWrapper struct { - Value []byte - Generation uint64 -} - -// ErrNotFound is returned by GET when no record for the key is found -var ErrNotFound = errors.New("No record for query") - -func IsErrNotFound(err error) bool { - if strings.Contains(err.Error(), ErrNotFound.Error()) { - return true - } - return false -} - -type Api interface { - Put(table string, key string, value []byte) error - PutMany(table string, valuesToPut map[string][]byte) (map[string]error, error) - Get(table string, key string) ([]byte, uint64, error) - GetMany(table string, keys []string) (map[string]ValueWrapper, error) - Delete(table string, key string) error - DeleteMany(table string, keys []string) (map[string]error, error) - ListKeys(table string) ([]string, error) - DeleteTable(table string) error - DoesKeyExist(table string, key string) (bool, error) -} diff --git a/orc8r/cloud/go/datastore/definitions.go b/orc8r/cloud/go/datastore/definitions.go deleted file mode 100644 index f7d480dd0d0f..000000000000 --- a/orc8r/cloud/go/datastore/definitions.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package datastore - -import ( - "os" -) - -func GetEnvWithDefault(variable string, defaultValue string) string { - value := os.Getenv(variable) - if len(value) == 0 { - value = defaultValue - } - return value -} - -var ( - SQL_DRIVER = GetEnvWithDefault("SQL_DRIVER", "sqlite3") - DATABASE_SOURCE = GetEnvWithDefault("DATABASE_SOURCE", ":memory:") -) diff --git a/orc8r/cloud/go/datastore/mocks/Api.go b/orc8r/cloud/go/datastore/mocks/Api.go deleted file mode 100644 index ee19bf7636a1..000000000000 --- a/orc8r/cloud/go/datastore/mocks/Api.go +++ /dev/null @@ -1,209 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -// To regenerate this when the interface changes, run -// mockery -name=Api in datastore/ - -// Code generated by mockery v1.0.0 -package mocks - -import ( - "magma/orc8r/cloud/go/datastore" - - "github.com/stretchr/testify/mock" -) - -// Api is an autogenerated mock type for the Api type -type Api struct { - mock.Mock -} - -// Delete provides a mock function with given fields: table, key -func (_m *Api) Delete(table string, key string) error { - ret := _m.Called(table, key) - - var r0 error - if rf, ok := ret.Get(0).(func(string, string) error); ok { - r0 = rf(table, key) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DeleteMany provides a mock function with given fields: table, keys -func (_m *Api) DeleteMany(table string, keys []string) (map[string]error, error) { - ret := _m.Called(table, keys) - - var r0 map[string]error - if rf, ok := ret.Get(0).(func(string, []string) map[string]error); ok { - r0 = rf(table, keys) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]error) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, []string) error); ok { - r1 = rf(table, keys) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DeleteTable provides a mock function with given fields: table -func (_m *Api) DeleteTable(table string) error { - ret := _m.Called(table) - - var r0 error - if rf, ok := ret.Get(0).(func(string) error); ok { - r0 = rf(table) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DoesKeyExist provides a mock function with given fields: table, key -func (_m *Api) DoesKeyExist(table string, key string) (bool, error) { - ret := _m.Called(table, key) - - var r0 bool - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(table, key) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(table, key) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Get provides a mock function with given fields: table, key -func (_m *Api) Get(table string, key string) ([]byte, uint64, error) { - ret := _m.Called(table, key) - - var r0 []byte - if rf, ok := ret.Get(0).(func(string, string) []byte); ok { - r0 = rf(table, key) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 uint64 - if rf, ok := ret.Get(1).(func(string, string) uint64); ok { - r1 = rf(table, key) - } else { - r1 = ret.Get(1).(uint64) - } - - var r2 error - if rf, ok := ret.Get(2).(func(string, string) error); ok { - r2 = rf(table, key) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// GetMany provides a mock function with given fields: table, keys -func (_m *Api) GetMany(table string, keys []string) (map[string]datastore.ValueWrapper, error) { - ret := _m.Called(table, keys) - - var r0 map[string]datastore.ValueWrapper - if rf, ok := ret.Get(0).(func(string, []string) map[string]datastore.ValueWrapper); ok { - r0 = rf(table, keys) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]datastore.ValueWrapper) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, []string) error); ok { - r1 = rf(table, keys) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ListKeys provides a mock function with given fields: table -func (_m *Api) ListKeys(table string) ([]string, error) { - ret := _m.Called(table) - - var r0 []string - if rf, ok := ret.Get(0).(func(string) []string); ok { - r0 = rf(table) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(table) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Put provides a mock function with given fields: table, key, value -func (_m *Api) Put(table string, key string, value []byte) error { - ret := _m.Called(table, key, value) - - var r0 error - if rf, ok := ret.Get(0).(func(string, string, []byte) error); ok { - r0 = rf(table, key, value) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PutMany provides a mock function with given fields: table, valuesToPut -func (_m *Api) PutMany(table string, valuesToPut map[string][]byte) (map[string]error, error) { - ret := _m.Called(table, valuesToPut) - - var r0 map[string]error - if rf, ok := ret.Get(0).(func(string, map[string][]byte) map[string]error); ok { - r0 = rf(table, valuesToPut) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]error) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, map[string][]byte) error); ok { - r1 = rf(table, valuesToPut) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/orc8r/cloud/go/datastore/sql.go b/orc8r/cloud/go/datastore/sql.go deleted file mode 100644 index 70516ff3d7b7..000000000000 --- a/orc8r/cloud/go/datastore/sql.go +++ /dev/null @@ -1,303 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package datastore - -import ( - "database/sql" - "fmt" - - "magma/orc8r/cloud/go/sqorc" - - sq "github.com/Masterminds/squirrel" - _ "github.com/go-sql-driver/mysql" - _ "github.com/lib/pq" - _ "github.com/mattn/go-sqlite3" - "github.com/pkg/errors" - "github.com/thoas/go-funk" -) - -const ( - // escaped for mysql compat - keyCol = "\"key\"" - valueCol = "value" - genCol = "generation_number" - deletedCol = "deleted" -) - -type SqlDb struct { - db *sql.DB - builder sqorc.StatementBuilder -} - -func NewSqlDb(driver string, source string, sqlBuilder sqorc.StatementBuilder) (*SqlDb, error) { - db, err := sqorc.Open(driver, source) - if err != nil { - return nil, err - } - - return &SqlDb{ - db: db, - builder: sqlBuilder, - }, nil -} - -func (store *SqlDb) getInitFn(table string) func(*sql.Tx) error { - return func(tx *sql.Tx) error { - _, err := store.builder.CreateTable(table). - IfNotExists(). - // table builder escapes all columns by default - Column(keyCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). - Column(valueCol).Type(sqorc.ColumnTypeBytes).EndColumn(). - Column(genCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). - Column(deletedCol).Type(sqorc.ColumnTypeBool).NotNull().Default("FALSE").EndColumn(). - RunWith(tx). - Exec() - if err != nil { - return errors.Wrap(err, "failed to init table") - } - return nil - } -} - -func (store *SqlDb) Put(table string, key string, value []byte) error { - txFn := func(tx *sql.Tx) (interface{}, error) { - // Check if the data is already present and query for its generation number - var generationNumber uint64 - err := store.builder.Select(genCol). - From(table). - Where(sq.Eq{keyCol: key}). - RunWith(tx). - QueryRow(). - Scan(&generationNumber) - if err != nil && err != sql.ErrNoRows { - return nil, errors.Wrap(err, "failed to query for existing generation number") - } - - rowExists := err == nil - if rowExists { - return store.builder.Update(table). - Set(valueCol, value). - Set(genCol, generationNumber+1). - Where(sq.Eq{keyCol: key}). - RunWith(tx). - Exec() - } else { - return store.builder.Insert(table). - Columns(keyCol, valueCol). - Values(key, value). - RunWith(tx). - Exec() - } - } - _, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return err -} - -func (store *SqlDb) PutMany(table string, valuesToPut map[string][]byte) (map[string]error, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - ret := map[string]error{} - rowKeys := make([]string, len(valuesToPut)) - for k := range valuesToPut { - rowKeys = append(rowKeys, k) - } - - existingRows, err := store.getMany(tx, table, rowKeys) - if err != nil { - return ret, errors.Wrap(err, "failed to query for existing rows") - } - - rowsToUpdate := [][3]interface{}{} // (val, gen, key) - rowsToInsert := [][2]interface{}{} // (key, val) - for key, newValue := range valuesToPut { - if existingValue, keyExists := existingRows[key]; keyExists { - rowsToUpdate = append(rowsToUpdate, [3]interface{}{newValue, existingValue.Generation + 1, key}) - } else { - rowsToInsert = append(rowsToInsert, [2]interface{}{key, newValue}) - } - } - - // Let squirrel cache prepared statements for us on update - sc := sq.NewStmtCache(tx) - defer sqorc.ClearStatementCacheLogOnError(sc, "PutMany") - - // Update existing rows - for _, row := range rowsToUpdate { - _, err := store.builder.Update(table). - Set(valueCol, row[0]). - Set(genCol, row[1]). - Where(sq.Eq{keyCol: row[2]}). - RunWith(sc). - Exec() - if err != nil { - ret[row[2].(string)] = err - } - } - - // Insert fresh rows - if !funk.IsEmpty(rowsToInsert) { - insertBuilder := store.builder.Insert(table). - Columns(keyCol, valueCol) - for _, row := range rowsToInsert { - insertBuilder = insertBuilder.Values(row[0], row[1]) - } - _, err := insertBuilder.RunWith(tx).Exec() - if err != nil { - return ret, errors.Wrap(err, "failed to create new entries") - } - } - - if funk.IsEmpty(ret) { - return ret, nil - } else { - return ret, errors.New("failed to write entries, see return value for specific errors") - } - } - - ret, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return ret.(map[string]error), err -} - -func (store *SqlDb) Get(table string, key string) ([]byte, uint64, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - var value []byte - var generationNumber uint64 - err := store.builder.Select(valueCol, genCol). - From(table). - Where(sq.Eq{keyCol: key}). - RunWith(tx). - QueryRow().Scan(&value, &generationNumber) - if err == sql.ErrNoRows { - return ValueWrapper{}, ErrNotFound - } - return ValueWrapper{Value: value, Generation: generationNumber}, err - } - - ret, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - if err != nil { - return nil, 0, err - } - vw := ret.(ValueWrapper) - return vw.Value, vw.Generation, nil -} - -func (store *SqlDb) GetMany(table string, keys []string) (map[string]ValueWrapper, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - return store.getMany(tx, table, keys) - } - ret, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return ret.(map[string]ValueWrapper), err -} - -func (store *SqlDb) Delete(table string, key string) error { - txFn := func(tx *sql.Tx) (interface{}, error) { - return store.builder.Delete(table).Where(sq.Eq{keyCol: key}).RunWith(tx).Exec() - } - _, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return err -} - -func (store *SqlDb) DeleteMany(table string, keys []string) (map[string]error, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - return store.builder.Delete(table).Where(sq.Eq{keyCol: keys}).RunWith(tx).Exec() - } - _, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return map[string]error{}, err -} - -func (store *SqlDb) ListKeys(table string) ([]string, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - rows, err := store.builder.Select(keyCol).From(table).RunWith(tx).Query() - if err != nil { - return []string{}, errors.Wrap(err, "failed to query for keys") - } - defer sqorc.CloseRowsLogOnError(rows, "ListKeys") - - keys := []string{} - for rows.Next() { - var key string - if err = rows.Scan(&key); err != nil { - return []string{}, errors.Wrap(err, "failed to read key") - } - keys = append(keys, key) - } - return keys, nil - } - - ret, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return ret.([]string), err -} - -func (store *SqlDb) DeleteTable(table string) error { - txFn := func(tx *sql.Tx) (interface{}, error) { - return tx.Exec(fmt.Sprintf("DROP TABLE IF EXISTS %s", table)) - } - // No initFn param because why would we create a table that we're dropping - _, err := sqorc.ExecInTx(store.db, func(*sql.Tx) error { return nil }, txFn) - return err -} - -func (store *SqlDb) DoesKeyExist(table string, key string) (bool, error) { - txFn := func(tx *sql.Tx) (interface{}, error) { - var placeHolder uint64 - err := store.builder.Select("1").From(table). - Where(sq.Eq{keyCol: key}). - Limit(1). - RunWith(tx). - QueryRow().Scan(&placeHolder) - if err != nil { - if err == sql.ErrNoRows { - return false, nil - } - return false, err - } - return true, nil - } - ret, err := sqorc.ExecInTx(store.db, store.getInitFn(table), txFn) - return ret.(bool), err -} - -func (store *SqlDb) getMany(tx *sql.Tx, table string, keys []string) (map[string]ValueWrapper, error) { - valuesByKey := map[string]ValueWrapper{} - if len(keys) == 0 { - return valuesByKey, nil - } - - rows, err := store.builder.Select(keyCol, valueCol, genCol). - From(table). - Where(sq.Eq{keyCol: keys}). - RunWith(tx). - Query() - if err != nil { - return valuesByKey, err - } - defer sqorc.CloseRowsLogOnError(rows, "getMany") - return getSqlRowsAsMap(rows) -} - -func getSqlRowsAsMap(rows *sql.Rows) (map[string]ValueWrapper, error) { - var valuesByKey = make(map[string]ValueWrapper) - - for rows.Next() { - var key string - var value []byte - var generationNumber uint64 - - err := rows.Scan(&key, &value, &generationNumber) - if err != nil { - return map[string]ValueWrapper{}, err - } - - valuesByKey[key] = ValueWrapper{ - Value: value, - Generation: generationNumber, - } - } - - return valuesByKey, nil -} diff --git a/orc8r/cloud/go/datastore/sql_test.go b/orc8r/cloud/go/datastore/sql_test.go deleted file mode 100644 index 54be33252605..000000000000 --- a/orc8r/cloud/go/datastore/sql_test.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package datastore_test - -import ( - "testing" - - "magma/orc8r/cloud/go/datastore" - "magma/orc8r/cloud/go/sqorc" - - _ "github.com/mattn/go-sqlite3" - "github.com/stretchr/testify/assert" -) - -func TestDatastoreBasics(t *testing.T) { - key := "magic" - value := []byte("Hello world!") - table := "test_table" - // Create an in-memory sqlite datastore for testing - ds, err := datastore.NewSqlDb("sqlite3", ":memory:", sqorc.GetSqlBuilder()) - assert.NoError(t, err) - - // Add the key to the datastore - err = ds.Put(table, key, value) - assert.NoError(t, err) - - exists, err := ds.DoesKeyExist(table, key) - assert.NoError(t, err) - assert.True(t, exists) - exists, err = ds.DoesKeyExist(table, "SHOULD NOT EXIST") - assert.NoError(t, err) - assert.False(t, exists) - - res, _, err := ds.Get(table, key) - assert.NoError(t, err) - assert.Equal(t, value, res) - - keys, err := ds.ListKeys(table) - assert.NoError(t, err) - assert.Equal(t, []string{key}, keys) - - // Delete the key and check the datastore - err = ds.Delete(table, key) - assert.NoError(t, err) - _, _, err = ds.Get(table, key) - assert.Error(t, err) // key missing now - - keys, err = ds.ListKeys(table) - assert.NoError(t, err) - assert.Equal(t, []string{}, keys) - - err = ds.DeleteTable(table) - assert.NoError(t, err) -} - -func TestDatastoreBulkOperations(t *testing.T) { - ds, err := datastore.NewSqlDb("sqlite3", ":memory:", sqorc.GetSqlBuilder()) - assert.NoError(t, err) - - // Bulk insert KV's, no updates - valuesToPut := map[string][]byte{ - "key1": []byte("value1"), - "key2": []byte("value2"), - } - - expectedFailedKeys := make(map[string]error, 0) - failedKeys, err := ds.PutMany("test", valuesToPut) - assert.NoError(t, err) - assert.Equal(t, expectedFailedKeys, failedKeys) - - dbRows, err := ds.GetMany("test", []string{}) - assert.NoError(t, err) - assert.Equal(t, map[string]datastore.ValueWrapper{}, dbRows) - - dbRows, err = ds.GetMany("test", []string{"key1", "key2"}) - assert.NoError(t, err) - expectedDbRows := map[string]datastore.ValueWrapper{ - "key1": { - Value: []byte("value1"), - Generation: 0, - }, - "key2": { - Value: []byte("value2"), - Generation: 0, - }, - } - assert.Equal(t, expectedDbRows, dbRows) - - // PutAll with 1 update and 1 insert - valuesToPut = map[string][]byte{ - "key2": []byte("newvalue2"), - "key3": []byte("value3"), - } - failedKeys, err = ds.PutMany("test", valuesToPut) - assert.NoError(t, err) - assert.Equal(t, expectedFailedKeys, failedKeys) - - dbRows, err = ds.GetMany("test", []string{"key1", "key2", "key3"}) - assert.NoError(t, err) - expectedDbRows = map[string]datastore.ValueWrapper{ - "key1": { - Value: []byte("value1"), - Generation: 0, - }, - "key2": { - Value: []byte("newvalue2"), - Generation: 1, - }, - "key3": { - Value: []byte("value3"), - Generation: 0, - }, - } - assert.Equal(t, expectedDbRows, dbRows) - - // Empty PutAll - failedKeys, err = ds.PutMany("test", map[string][]byte{}) - assert.NoError(t, err) - assert.Equal(t, expectedFailedKeys, failedKeys) - - dbRows, err = ds.GetMany("test", []string{"key1", "key2", "key3"}) - assert.NoError(t, err) - assert.Equal(t, expectedDbRows, dbRows) - - // Empty GetAll - emptyDbRows, err := ds.GetMany("test", []string{}) - assert.NoError(t, err) - assert.Equal(t, map[string]datastore.ValueWrapper{}, emptyDbRows) - - // Delete many - failedKeys, err = ds.DeleteMany("test", []string{"key1", "key2"}) - assert.NoError(t, err) - assert.Equal(t, expectedFailedKeys, failedKeys) - expectedDbRows = map[string]datastore.ValueWrapper{ - "key3": { - Value: []byte("value3"), - Generation: 0, - }, - } - dbRows, err = ds.GetMany("test", []string{"key1", "key2", "key3"}) - assert.NoError(t, err) - assert.Equal(t, expectedDbRows, dbRows) - -} diff --git a/orc8r/cloud/go/datastore/sync_store.go b/orc8r/cloud/go/datastore/sync_store.go deleted file mode 100644 index fc78c99426eb..000000000000 --- a/orc8r/cloud/go/datastore/sync_store.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package datastore - -import "sync" - -type SyncStore struct { - store Api - lock *sync.Mutex -} - -func NewSyncStore(store Api) *SyncStore { - return &SyncStore{store, &sync.Mutex{}} -} - -func (s *SyncStore) Put(table string, key string, value []byte) error { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.Put(table, key, value) -} - -func (s *SyncStore) PutMany(table string, valuesToPut map[string][]byte) (map[string]error, error) { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.PutMany(table, valuesToPut) -} - -func (s *SyncStore) Get(table string, key string) ([]byte, uint64, error) { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.Get(table, key) -} - -func (s *SyncStore) GetMany(table string, keys []string) (map[string]ValueWrapper, error) { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.GetMany(table, keys) -} - -func (s *SyncStore) Delete(table string, key string) error { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.Delete(table, key) -} - -func (s *SyncStore) DeleteMany(table string, keys []string) (map[string]error, error) { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.DeleteMany(table, keys) -} - -func (s *SyncStore) ListKeys(table string) ([]string, error) { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.ListKeys(table) -} - -func (s *SyncStore) DeleteTable(table string) error { - s.lock.Lock() - defer s.lock.Unlock() - return s.store.DeleteTable(table) -} diff --git a/orc8r/cloud/go/datastore/utils.go b/orc8r/cloud/go/datastore/utils.go deleted file mode 100644 index 94728c72935e..000000000000 --- a/orc8r/cloud/go/datastore/utils.go +++ /dev/null @@ -1,15 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package datastore - -// We create a table for each network for all serivces. -// This utility function provides the table name to use. -func GetTableName(networkId string, store string) string { - return networkId + "_" + store -} diff --git a/orc8r/cloud/go/go.mod b/orc8r/cloud/go/go.mod index 40b01110fa5f..326e1d825174 100644 --- a/orc8r/cloud/go/go.mod +++ b/orc8r/cloud/go/go.mod @@ -33,6 +33,7 @@ require ( github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.3.3 + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect github.com/google/uuid v1.1.1 github.com/gorilla/handlers v1.4.0 // indirect github.com/hpcloud/tail v1.0.0 @@ -42,6 +43,8 @@ require ( github.com/lib/pq v1.2.0 github.com/mattn/go-sqlite3 v1.11.0 github.com/olivere/elastic/v7 v7.0.6 + github.com/onsi/ginkgo v1.7.0 // indirect + github.com/onsi/gomega v1.4.3 // indirect github.com/pkg/errors v0.8.1 github.com/prometheus/alertmanager v0.17.0 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 @@ -58,6 +61,7 @@ require ( golang.org/x/lint v0.0.0-20190409202823-959b441ac422 golang.org/x/net v0.0.0-20200202094626-16171245cfb2 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a + google.golang.org/api v0.3.1 // indirect google.golang.org/grpc v1.27.1 gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 gopkg.in/yaml.v2 v2.2.8 diff --git a/orc8r/cloud/go/obsidian/obsidian/main.go b/orc8r/cloud/go/obsidian/obsidian/main.go index af5530f230aa..759b5e105539 100644 --- a/orc8r/cloud/go/obsidian/obsidian/main.go +++ b/orc8r/cloud/go/obsidian/obsidian/main.go @@ -11,8 +11,8 @@ package main import ( "flag" "log" + "magma/orc8r/lib/go/definitions" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/obsidian" "magma/orc8r/cloud/go/obsidian/server" "magma/orc8r/cloud/go/orc8r" @@ -27,17 +27,17 @@ func main() { flag.BoolVar(&obsidian.TLS, "tls", false, "HTTPS only access") flag.StringVar( &obsidian.ServerCertPemPath, "cert", - datastore.GetEnvWithDefault("REST_CERT", obsidian.DefaultServerCert), + definitions.GetEnvWithDefault("REST_CERT", obsidian.DefaultServerCert), "Server's certificate PEM file", ) flag.StringVar( &obsidian.ServerKeyPemPath, "cert_key", - datastore.GetEnvWithDefault("REST_CERT_KEY", obsidian.DefaultServerCertKey), + definitions.GetEnvWithDefault("REST_CERT_KEY", obsidian.DefaultServerCertKey), "Server's certificate private key PEM file", ) flag.StringVar( &obsidian.ClientCAPoolPath, "client_ca", - datastore.GetEnvWithDefault("REST_CLIENT_CERT", obsidian.DefaultClientCAs), + definitions.GetEnvWithDefault("REST_CLIENT_CERT", obsidian.DefaultClientCAs), "Client certificate CA pool PEM file", ) flag.BoolVar( diff --git a/orc8r/cloud/go/pluginimpl/handlers/gateway_handlers_test.go b/orc8r/cloud/go/pluginimpl/handlers/gateway_handlers_test.go index 7bb40317459f..6f85ad10a769 100644 --- a/orc8r/cloud/go/pluginimpl/handlers/gateway_handlers_test.go +++ b/orc8r/cloud/go/pluginimpl/handlers/gateway_handlers_test.go @@ -80,7 +80,7 @@ func TestListGateways(t *testing.T) { // add device and state to g1 clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) gatewayRecord := &models.GatewayDevice{HardwareID: "hw1", Key: &models.ChallengeKey{KeyType: "ECHO"}} err = device.RegisterDevice("n1", orc8r.AccessGatewayRecordType, "hw1", gatewayRecord) assert.NoError(t, err) @@ -313,7 +313,7 @@ func TestGetGateway(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) deviceTestInit.StartTestService(t) @@ -630,7 +630,7 @@ func TestGetPartialReadHandlers(t *testing.T) { _ = plugin.RegisterPluginForTests(t, &pluginimpl.BaseOrchestratorPlugin{}) clock.SetAndFreezeClock(t, time.Unix(1000000, 0)) - defer clock.GetUnfreezeClockDeferFunc(t)() + defer clock.UnfreezeClock(t) test_init.StartTestService(t) deviceTestInit.StartTestService(t) diff --git a/orc8r/cloud/go/service/service.go b/orc8r/cloud/go/service/service.go index 42ed93306629..92e58093805e 100644 --- a/orc8r/cloud/go/service/service.go +++ b/orc8r/cloud/go/service/service.go @@ -27,7 +27,7 @@ import ( "github.com/golang/glog" "google.golang.org/grpc" "google.golang.org/grpc/encoding" - grpc_proto "google.golang.org/grpc/encoding/proto" + grpcProto "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/keepalive" ) @@ -122,7 +122,7 @@ func newServiceWithOptionsImpl(moduleName string, serviceName string, serverOpti // Check if service was started with print-grpc-payload flag or MAGMA_PRINT_GRPC_PAYLOAD env is set if printGrpcPayload || util.IsTruthyEnv(PrintGrpcPayloadEnv) { - ls := logCodec{encoding.GetCodec(grpc_proto.Name)} + ls := logCodec{encoding.GetCodec(grpcProto.Name)} if ls.protoCodec != nil { glog.Errorf("Adding Debug Codec for service %s", serviceName) encoding.RegisterCodec(ls) @@ -157,13 +157,13 @@ func newServiceWithOptionsImpl(moduleName string, serviceName string, serverOpti func (service *Service) Run() error { port, err := registry.GetServicePort(service.Type) if err != nil { - return fmt.Errorf("Failed to get service port: %s", err) + return fmt.Errorf("failed to get service port: %s", err) } // Create the server socket for gRPC lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) if err != nil { - return fmt.Errorf("Failed to listen on port %d: %s", port, err) + return fmt.Errorf("failed to listen on port %d: %s", port, err) } service.State = protos.ServiceInfo_ALIVE service.Health = protos.ServiceInfo_APP_HEALTHY @@ -172,10 +172,13 @@ func (service *Service) Run() error { // Run the test service on a given Listener. This function blocks // by a signal or until the gRPC server is stopped. -func (service *Service) RunTest(lis net.Listener) error { +func (service *Service) RunTest(lis net.Listener) { service.State = protos.ServiceInfo_ALIVE service.Health = protos.ServiceInfo_APP_HEALTHY - return service.GrpcServer.Serve(lis) + err := service.GrpcServer.Serve(lis) + if err != nil { + glog.Fatal("Failed to run test service") + } } // GetDefaultKeepaliveParameters returns the default keepalive server parameters. diff --git a/orc8r/cloud/go/services/accessd/accessd/main.go b/orc8r/cloud/go/services/accessd/accessd/main.go index 45144b141e12..59a1884608bd 100644 --- a/orc8r/cloud/go/services/accessd/accessd/main.go +++ b/orc8r/cloud/go/services/accessd/accessd/main.go @@ -11,9 +11,7 @@ LICENSE file in the root directory of this source tree. package main import ( - "log" - - "magma/orc8r/cloud/go/datastore" + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/accessd" @@ -21,30 +19,36 @@ import ( "magma/orc8r/cloud/go/services/accessd/servicers" "magma/orc8r/cloud/go/services/accessd/storage" "magma/orc8r/cloud/go/sqorc" + + "github.com/golang/glog" ) func main() { // Create the service srv, err := service.NewOrchestratorService(orc8r.ModuleName, accessd.ServiceName) if err != nil { - log.Fatalf("Error creating service: %s", err) + glog.Fatalf("Error creating service: %s", err) } - // Init the Datastore - ds, err := - datastore.NewSqlDb(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE, sqorc.GetSqlBuilder()) + // Init storage + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) + if err != nil { + glog.Fatalf("Failed to connect to database: %s", err) + } + fact := blobstore.NewEntStorage(storage.AccessdTableBlobstore, db, sqorc.GetSqlBuilder()) + err = fact.InitializeFactory() if err != nil { - log.Fatalf("Failed to initialize datastore: %s", err) + glog.Fatalf("Error initializing accessd database: %s", err) } + store := storage.NewAccessdBlobstore(fact) // Add servicers to the service - accessdStore := storage.NewAccessdDatastore(ds) - accessdServer := servicers.NewAccessdServer(accessdStore) + accessdServer := servicers.NewAccessdServer(store) protos.RegisterAccessControlManagerServer(srv.GrpcServer, accessdServer) // Run the service err = srv.Run() if err != nil { - log.Fatalf("Error running service: %s", err) + glog.Fatalf("Error running service: %s", err) } } diff --git a/orc8r/cloud/go/services/accessd/accessd_test.go b/orc8r/cloud/go/services/accessd/accessd_test.go index 06e66fbde3c3..843828485d9e 100644 --- a/orc8r/cloud/go/services/accessd/accessd_test.go +++ b/orc8r/cloud/go/services/accessd/accessd_test.go @@ -16,13 +16,13 @@ import ( "magma/orc8r/cloud/go/identity" "magma/orc8r/cloud/go/services/accessd" accessprotos "magma/orc8r/cloud/go/services/accessd/protos" - accessd_test_service "magma/orc8r/cloud/go/services/accessd/test_init" + accessdTestService "magma/orc8r/cloud/go/services/accessd/test_init" "magma/orc8r/lib/go/protos" ) func TestAccessManager(t *testing.T) { - accessd_test_service.StartTestService(t) + accessdTestService.StartTestService(t) op1 := identity.NewOperator("operator1") assert.NotEmpty(t, op1.ToCommonName()) diff --git a/orc8r/cloud/go/services/accessd/obsidian/handlers/accessd_handlers_test.go b/orc8r/cloud/go/services/accessd/obsidian/handlers/accessd_handlers_test.go index d3bbd69229ff..587cd09440f6 100644 --- a/orc8r/cloud/go/services/accessd/obsidian/handlers/accessd_handlers_test.go +++ b/orc8r/cloud/go/services/accessd/obsidian/handlers/accessd_handlers_test.go @@ -21,13 +21,13 @@ import ( "github.com/stretchr/testify/assert" "magma/orc8r/cloud/go/obsidian/access" - access_tests "magma/orc8r/cloud/go/obsidian/access/tests" + accessTests "magma/orc8r/cloud/go/obsidian/access/tests" "magma/orc8r/cloud/go/services/accessd/obsidian/handlers" "magma/orc8r/cloud/go/services/accessd/obsidian/models" "magma/orc8r/cloud/go/services/accessd/test_init" "magma/orc8r/cloud/go/test_utils" - security_cert "magma/orc8r/lib/go/security/cert" - certifier_test_utils "magma/orc8r/lib/go/security/csr" + securityCert "magma/orc8r/lib/go/security/cert" + certifierTestUtils "magma/orc8r/lib/go/security/csr" ) const ( @@ -42,7 +42,7 @@ const ( func testInit(t *testing.T) (string, map[models.OperatorID]models.Certificate, map[models.OperatorID]models.ACLType) { test_init.StartTestService(t) - testOperatorSerialNumber := access_tests.StartMockAccessControl(t, testAdminOperatorID) + testOperatorSerialNumber := accessTests.StartMockAccessControl(t, testAdminOperatorID) certificates := make(map[models.OperatorID]models.Certificate) acls := make(map[models.OperatorID]models.ACLType) initializeOperators(t, certificates, acls) @@ -50,10 +50,8 @@ func testInit(t *testing.T) (string, map[models.OperatorID]models.Certificate, m } func cleanup(t *testing.T) { - err := test_utils.GetMockDatastoreInstance().DeleteTable("access_control") - assert.NoError(t, err) - err = test_utils.GetMockDatastoreInstance().DeleteTable("certificate_info_db") - assert.NoError(t, err) + test_utils.DropTableFromSharedTestDB(t, "access_control_blobstore") + test_utils.DropTableFromSharedTestDB(t, "certificate_info_blobstore") } func TestListOperators(t *testing.T) { @@ -96,7 +94,7 @@ func TestGetOperatorsDetail(t *testing.T) { } e := echo.New() req := httptest.NewRequest(echo.GET, "/", nil) - req.Header.Set(access.CLIENT_CERT_SN_KEY, string(testOperatorSN)) + req.Header.Set(access.CLIENT_CERT_SN_KEY, testOperatorSN) rec := httptest.NewRecorder() c := e.NewContext(req, rec) c.SetParamNames("operator_id") @@ -153,7 +151,7 @@ func TestDeleteOperators(t *testing.T) { testOperatorSN, _, _ := testInit(t) e := echo.New() req := httptest.NewRequest(echo.DELETE, "/", nil) - req.Header.Set(access.CLIENT_CERT_SN_KEY, string(testOperatorSN)) + req.Header.Set(access.CLIENT_CERT_SN_KEY, testOperatorSN) rec := httptest.NewRecorder() c := e.NewContext(req, rec) c.SetParamNames("operator_id") @@ -163,7 +161,7 @@ func TestDeleteOperators(t *testing.T) { assert.Equal(t, http.StatusNoContent, rec.Code) req = httptest.NewRequest(echo.GET, "/", nil) - req.Header.Set(access.CLIENT_CERT_SN_KEY, string(testOperatorSN)) + req.Header.Set(access.CLIENT_CERT_SN_KEY, testOperatorSN) rec = httptest.NewRecorder() c = e.NewContext(req, rec) c.SetParamNames("operator_id") @@ -278,7 +276,7 @@ func TestPutPermissions(t *testing.T) { assert.NoError(t, err) e := echo.New() req := httptest.NewRequest(echo.PUT, "/", strings.NewReader(string(newPermissionsBytes))) - req.Header.Set(access.CLIENT_CERT_SN_KEY, string(testOperatorSN)) + req.Header.Set(access.CLIENT_CERT_SN_KEY, testOperatorSN) req.Header.Set(echo.HeaderContentType, echo.MIMEApplicationJSON) rec := httptest.NewRecorder() c := e.NewContext(req, rec) @@ -290,7 +288,7 @@ func TestPutPermissions(t *testing.T) { // Check that permissions were updated properly req = httptest.NewRequest(echo.GET, "/", nil) - req.Header.Set(access.CLIENT_CERT_SN_KEY, string(testOperatorSN)) + req.Header.Set(access.CLIENT_CERT_SN_KEY, testOperatorSN) rec = httptest.NewRecorder() c = e.NewContext(req, rec) c.SetParamNames("operator_id", "network_id") @@ -329,7 +327,7 @@ func TestGetCertificate(t *testing.T) { func TestPostDeleteCertificate(t *testing.T) { defer cleanup(t) testOperatorSN, certificates, _ := testInit(t) - csr, err := certifier_test_utils.CreateCSR( + csr, err := certifierTestUtils.CreateCSR( time.Duration(int64(time.Hour*24*365)), string(operator1ID), string(operator1ID), @@ -402,9 +400,9 @@ func assertOperatorRecordResponse(t *testing.T, expectedRecord *models.OperatorR } func certToSerialNumber(t *testing.T, certificate models.Certificate) models.CertificateSn { - cert, err := x509.ParseCertificate([]byte(certificate)) + cert, err := x509.ParseCertificate(certificate) assert.NoError(t, err) - return models.CertificateSn(security_cert.SerialToString(cert.SerialNumber)) + return models.CertificateSn(securityCert.SerialToString(cert.SerialNumber)) } func getOperatorSetFromJSON(jsonstr []byte) (map[string]bool, error) { @@ -429,7 +427,7 @@ func initializeOperators(t *testing.T, certificates map[models.OperatorID]models } func initializeOperator1(t *testing.T, certificates map[models.OperatorID]models.Certificate, acls map[models.OperatorID]models.ACLType) { - operator1CSR, err := certifier_test_utils.CreateCSR( + operator1CSR, err := certifierTestUtils.CreateCSR( time.Duration(int64(time.Hour*24*365)), string(operator1ID), string(operator1ID), @@ -463,7 +461,7 @@ func initializeOperator1(t *testing.T, certificates map[models.OperatorID]models } func initializeOperator2(t *testing.T, certificates map[models.OperatorID]models.Certificate, acls map[models.OperatorID]models.ACLType) { - operator2CSR, err := certifier_test_utils.CreateCSR( + operator2CSR, err := certifierTestUtils.CreateCSR( time.Duration(int64(time.Hour*24*365)), string(operator2ID), string(operator2ID), @@ -505,7 +503,7 @@ func initializeOperator2(t *testing.T, certificates map[models.OperatorID]models } func initializeAdminOperator(t *testing.T, certificates map[models.OperatorID]models.Certificate, acls map[models.OperatorID]models.ACLType) { - adminCSR, err := certifier_test_utils.CreateCSR( + adminCSR, err := certifierTestUtils.CreateCSR( time.Duration(int64(time.Hour*24*365)), string(adminID), string(adminID), @@ -548,5 +546,5 @@ func createOperator(t *testing.T, createRecord models.CreateOperatorRecord) mode assert.NoError(t, err) assert.Equal(t, http.StatusCreated, rec.Code) response := rec.Body.String() - return models.Certificate([]byte(response)) + return []byte(response) } diff --git a/orc8r/cloud/go/services/accessd/storage/storage_datastore.go b/orc8r/cloud/go/services/accessd/storage/storage_datastore.go deleted file mode 100644 index 1f384a0128e7..000000000000 --- a/orc8r/cloud/go/services/accessd/storage/storage_datastore.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - Copyright (c) Facebook, Inc. and its affiliates. - All rights reserved. - - This source code is licensed under the BSD-style license found in the - LICENSE file in the root directory of this source tree. -*/ - -package storage - -import ( - "magma/orc8r/cloud/go/datastore" - accessprotos "magma/orc8r/cloud/go/services/accessd/protos" - "magma/orc8r/lib/go/protos" - - "github.com/golang/protobuf/proto" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const ( - accessdTableDatastore = "access_control" -) - -type accessdDatastore struct { - store datastore.Api -} - -// NewAccessdDatastore returns an initialized instance of accessdDatastore as AccessdStorage. -func NewAccessdDatastore(store datastore.Api) AccessdStorage { - return &accessdDatastore{store: store} -} - -func (a *accessdDatastore) ListAllIdentity() ([]*protos.Identity, error) { - idHashes, err := a.store.ListKeys(accessdTableDatastore) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to list keys: %s", err) - } - - marshaledACLs, err := a.store.GetMany(accessdTableDatastore, idHashes) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get many acls: %s", err) - } - - var ret []*protos.Identity - for _, mACLWrapper := range marshaledACLs { - acl := &accessprotos.AccessControl_List{} - err = proto.Unmarshal(mACLWrapper.Value, acl) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal acl: %s", err) - } - ret = append(ret, acl.Operator) - } - - return ret, nil -} - -func (a *accessdDatastore) GetACL(id *protos.Identity) (*accessprotos.AccessControl_List, error) { - acls, err := a.GetManyACL([]*protos.Identity{id}) - if err != nil { - return nil, err - } - for _, acl := range acls { - return acl, nil - } - return nil, status.Errorf(codes.NotFound, "get ACL error for Operator %s: %s", id.HashString(), err) -} - -func (a *accessdDatastore) GetManyACL(ids []*protos.Identity) ([]*accessprotos.AccessControl_List, error) { - if ids == nil { - return nil, status.Error(codes.InvalidArgument, "nil Identity list") - } - - idHashes := make([]string, 0, len(ids)) - for _, id := range ids { - if id == nil { - return nil, status.Error(codes.InvalidArgument, "nil Identity") - } - idHashes = append(idHashes, id.HashString()) - } - marshaledACLs, err := a.store.GetMany(accessdTableDatastore, idHashes) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to get many acls: %s", err) - } - - var ret []*accessprotos.AccessControl_List - for _, mACLWrapper := range marshaledACLs { - acl := &accessprotos.AccessControl_List{} - err = proto.Unmarshal(mACLWrapper.Value, acl) - if err != nil { - return nil, status.Errorf(codes.Internal, "failed to unmarshal acl: %s", err) - } - ret = append(ret, acl) - } - - return ret, nil -} - -func (a *accessdDatastore) PutACL(id *protos.Identity, acl *accessprotos.AccessControl_List) error { - if id == nil { - return status.Error(codes.InvalidArgument, "nil Identity") - } - if acl == nil { - return status.Error(codes.InvalidArgument, "nil AccessControl_List") - } - - marshaledACL, err := proto.Marshal(acl) - if err != nil { - return status.Errorf(codes.Internal, "failed to marshal acl: %s", err) - } - - err = a.store.Put(accessdTableDatastore, id.HashString(), marshaledACL) - if err != nil { - return status.Errorf(codes.Internal, "failed to put acl: %s", err) - } - - return nil -} - -// NOTE: datastore-implemented UpdateACLWithEntities is not atomic. -func (a *accessdDatastore) UpdateACLWithEntities(id *protos.Identity, entities []*accessprotos.AccessControl_Entity) error { - if id == nil { - return status.Error(codes.InvalidArgument, "nil Identity") - } - if entities == nil { - return status.Error(codes.InvalidArgument, "nil AccessControl_Entity slice") - } - - acl, err := a.GetACL(id) - if err != nil { - return err - } - - err = accessprotos.AddToACL(acl, entities) - if err != nil { - return err - } - - err = a.PutACL(id, acl) - if err != nil { - return err - } - - return nil -} - -func (a *accessdDatastore) DeleteACL(id *protos.Identity) error { - if id == nil { - return status.Error(codes.InvalidArgument, "nil Identity") - } - - err := a.store.Delete(accessdTableDatastore, id.HashString()) - if err != nil { - return status.Errorf(codes.Internal, "failed to delete acl: %s", err) - } - - return nil -} diff --git a/orc8r/cloud/go/services/accessd/storage/storage_integ_test.go b/orc8r/cloud/go/services/accessd/storage/storage_integ_test.go index 02b5d8d1a0a1..a43d8876698d 100644 --- a/orc8r/cloud/go/services/accessd/storage/storage_integ_test.go +++ b/orc8r/cloud/go/services/accessd/storage/storage_integ_test.go @@ -12,7 +12,6 @@ import ( "testing" "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/identity" accessprotos "magma/orc8r/cloud/go/services/accessd/protos" "magma/orc8r/cloud/go/services/accessd/storage" @@ -34,13 +33,6 @@ func TestAccessdStorageBlobstore_Integation(t *testing.T) { testAccessdStorageImpl(t, store) } -func TestAccessdStorageDatastore_Integation(t *testing.T) { - ds, err := datastore.NewSqlDb("sqlite3", ":memory:", sqorc.GetSqlBuilder()) - assert.NoError(t, err) - store := storage.NewAccessdDatastore(ds) - testAccessdStorageImpl(t, store) -} - func testAccessdStorageImpl(t *testing.T, store storage.AccessdStorage) { ids := []*protos.Identity{ identity.NewOperator("test_operator_0"), @@ -104,6 +96,7 @@ func testAccessdStorageImpl(t *testing.T, store storage.AccessdStorage) { err = store.DeleteACL(ids[0]) assert.NoError(t, err) _, err = store.GetACL(ids[0]) + assert.Error(t, err) assert.Contains(t, err.Error(), "NotFound") aclsRecvd, err = store.GetManyACL(ids[0:2]) assert.NoError(t, err) diff --git a/orc8r/cloud/go/services/accessd/test_init/start.go b/orc8r/cloud/go/services/accessd/test_init/start.go index 1723bb972a78..a479f1cf9e86 100644 --- a/orc8r/cloud/go/services/accessd/test_init/start.go +++ b/orc8r/cloud/go/services/accessd/test_init/start.go @@ -21,10 +21,10 @@ import ( func StartTestService(t *testing.T) { srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, accessd.ServiceName) - ds := test_utils.GetMockDatastoreInstance() - accessdStore := storage.NewAccessdDatastore(ds) + store := test_utils.NewEntStorage(t, storage.AccessdTableBlobstore) + accessdStore := storage.NewAccessdBlobstore(store) protos.RegisterAccessControlManagerServer( srv.GrpcServer, servicers.NewAccessdServer(accessdStore)) - go srv.GrpcServer.Serve(lis) + go srv.RunTest(lis) } diff --git a/orc8r/cloud/go/services/certifier/certifier/main.go b/orc8r/cloud/go/services/certifier/certifier/main.go index 375373ec72a9..4ece27b26117 100644 --- a/orc8r/cloud/go/services/certifier/certifier/main.go +++ b/orc8r/cloud/go/services/certifier/certifier/main.go @@ -10,10 +10,9 @@ package main import ( "flag" - "log" "time" - "magma/orc8r/cloud/go/datastore" + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/certifier" @@ -42,20 +41,26 @@ func main() { // Create the service, flag will be parsed inside this function srv, err := service.NewOrchestratorService(orc8r.ModuleName, certifier.ServiceName) if err != nil { - log.Fatalf("Error creating service: %s", err) + glog.Fatalf("Error creating service: %s", err) } - // Init the datastore - store, err := datastore.NewSqlDb(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE, sqorc.GetSqlBuilder()) + // Init storage + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { - log.Fatalf("Failed to initialize datastore: %s", err) + glog.Fatalf("Failed to connect to database: %s", err) } - caMap := map[protos.CertType]*servicers.CAInfo{} + fact := blobstore.NewEntStorage(storage.CertifierTableBlobstore, db, sqorc.GetSqlBuilder()) + err = fact.InitializeFactory() + if err != nil { + glog.Fatalf("Error initializing certifier database: %s", err) + } + store := storage.NewCertifierBlobstore(fact) // Add servicers to the service + caMap := map[protos.CertType]*servicers.CAInfo{} bootstrapCert, bootstrapPrivKey, err := cert.LoadCertAndPrivKey(*bootstrapCACertFile, *bootstrapCAKeyFile) if err != nil { - log.Printf("ERROR: Failed to load bootstrap CA cert and key: %v", err) + glog.Infof("ERROR: Failed to load bootstrap CA cert and key: %v", err) } else { caMap[protos.CertType_DEFAULT] = &servicers.CAInfo{Cert: bootstrapCert, PrivKey: bootstrapPrivKey} } @@ -63,17 +68,18 @@ func main() { if vpnErr != nil { fmtstr := "ERROR: Failed to load VPN cert and key: %v" if err != nil { - log.Fatalf(fmtstr, vpnErr) + glog.Fatalf(fmtstr, vpnErr) } else { - log.Printf(fmtstr, vpnErr) + glog.Infof(fmtstr, vpnErr) } } else { caMap[protos.CertType_VPN] = &servicers.CAInfo{Cert: vpnCert, PrivKey: vpnPrivKey} } - certStore := storage.NewCertifierDatastore(store) - servicer, err := servicers.NewCertifierServer(certStore, caMap) + + // Register servicer + servicer, err := servicers.NewCertifierServer(store, caMap) if err != nil { - log.Fatalf("Failed to create certifier server: %s", err) + glog.Fatalf("Failed to create certifier server: %s", err) } certprotos.RegisterCertifierServer(srv.GrpcServer, servicer) @@ -81,7 +87,7 @@ func main() { gc := time.Tick(time.Hour * time.Duration(*gcHours)) go func() { for now := range gc { - log.Printf("%v - Removing Stale Certificates", now) + glog.Infof("%v - Removing Stale Certificates", now) _, err := servicer.CollectGarbage(context.Background(), &protos.Void{}) if err != nil { glog.Errorf("error collecting garbage for certifier: %s", err) @@ -92,6 +98,6 @@ func main() { // Run the service err = srv.Run() if err != nil { - log.Fatalf("Error running service: %s", err) + glog.Fatalf("Error running service: %s", err) } } diff --git a/orc8r/cloud/go/services/certifier/client_test.go b/orc8r/cloud/go/services/certifier/client_api_test.go similarity index 100% rename from orc8r/cloud/go/services/certifier/client_test.go rename to orc8r/cloud/go/services/certifier/client_api_test.go diff --git a/orc8r/cloud/go/services/certifier/servicers/certifier_test.go b/orc8r/cloud/go/services/certifier/servicers/certifier_test.go index 37c7c253d402..0e04ec8bb772 100644 --- a/orc8r/cloud/go/services/certifier/servicers/certifier_test.go +++ b/orc8r/cloud/go/services/certifier/servicers/certifier_test.go @@ -14,15 +14,15 @@ import ( "time" "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/services/certifier/servicers" "magma/orc8r/cloud/go/services/certifier/storage" "magma/orc8r/cloud/go/sqorc" "magma/orc8r/lib/go/protos" - certifier_test_utils "magma/orc8r/lib/go/security/csr" + certifierTestUtils "magma/orc8r/lib/go/security/csr" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" + _ "github.com/mattn/go-sqlite3" "github.com/stretchr/testify/assert" "golang.org/x/net/context" ) @@ -37,17 +37,10 @@ func TestCertifierBlobstore(t *testing.T) { testCertifierImpl(t, store) } -func TestCertifierDatastore(t *testing.T) { - ds, err := datastore.NewSqlDb("sqlite3", ":memory:", sqorc.GetSqlBuilder()) - assert.NoError(t, err) - store := storage.NewCertifierDatastore(ds) - testCertifierImpl(t, store) -} - func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { ctx := context.Background() - caCert, caKey, err := certifier_test_utils.CreateSignedCertAndPrivKey(time.Hour * 24 * 10) + caCert, caKey, err := certifierTestUtils.CreateSignedCertAndPrivKey(time.Hour * 24 * 10) assert.NoError(t, err) // just test with default @@ -58,7 +51,7 @@ func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { assert.NoError(t, err) // sign and add - csrMsg, err := certifier_test_utils.CreateCSR(time.Duration(time.Hour*24*10), "cn", "cn") + csrMsg, err := certifierTestUtils.CreateCSR(time.Duration(time.Hour*24*10), "cn", "cn") assert.NoError(t, err) certMsg, err := srv.SignAddCertificate(ctx, csrMsg) assert.NoError(t, err) @@ -85,7 +78,7 @@ func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { assert.Error(t, err) // test expiration - csrMsg, err = certifier_test_utils.CreateCSR(0, "cn", "cn") + csrMsg, err = certifierTestUtils.CreateCSR(0, "cn", "cn") assert.NoError(t, err) certMsg, err = srv.SignAddCertificate(ctx, csrMsg) assert.NoError(t, err) @@ -98,7 +91,7 @@ func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { servicers.CollectGarbageAfter = time.Duration(0) for i := 0; i < 3; i++ { - csrMsg, err = certifier_test_utils.CreateCSR(0, "cn", "cn") + csrMsg, err = certifierTestUtils.CreateCSR(0, "cn", "cn") assert.NoError(t, err) _, err = srv.SignAddCertificate(ctx, csrMsg) assert.NoError(t, err) @@ -110,7 +103,7 @@ func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { assert.Equal(t, 0, len(allSns)) // test csr longer than cert - csrMsg, err = certifier_test_utils.CreateCSR(time.Duration(time.Hour*24*100), "cn", "cn") + csrMsg, err = certifierTestUtils.CreateCSR(time.Duration(time.Hour*24*100), "cn", "cn") assert.NoError(t, err) certMsg, err = srv.SignAddCertificate(ctx, csrMsg) assert.NoError(t, err) @@ -120,13 +113,13 @@ func testCertifierImpl(t *testing.T, store storage.CertifierStorage) { assert.True(t, notAfter.Equal(caCert.NotAfter)) // test CN mismatch - csrMsg, err = certifier_test_utils.CreateCSR(time.Duration(time.Hour*1), "cn", "nc") + csrMsg, err = certifierTestUtils.CreateCSR(time.Duration(time.Hour*1), "cn", "nc") assert.NoError(t, err) certMsg, err = srv.SignAddCertificate(ctx, csrMsg) assert.Error(t, err) // test CN onverwrite - csrMsg, err = certifier_test_utils.CreateCSR(time.Duration(time.Hour*1), "", "cn") + csrMsg, err = certifierTestUtils.CreateCSR(time.Duration(time.Hour*1), "", "cn") assert.NoError(t, err) certMsg, err = srv.SignAddCertificate(ctx, csrMsg) assert.NoError(t, err) diff --git a/orc8r/cloud/go/services/certifier/storage/storage_datastore.go b/orc8r/cloud/go/services/certifier/storage/storage_datastore.go deleted file mode 100644 index 4ca78367bd47..000000000000 --- a/orc8r/cloud/go/services/certifier/storage/storage_datastore.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - Copyright (c) Facebook, Inc. and its affiliates. - All rights reserved. - - This source code is licensed under the BSD-style license found in the - LICENSE file in the root directory of this source tree. -*/ - -package storage - -import ( - "magma/orc8r/cloud/go/datastore" - "magma/orc8r/cloud/go/services/certifier/protos" - merrors "magma/orc8r/lib/go/errors" - - "github.com/golang/protobuf/proto" - "github.com/pkg/errors" -) - -const ( - // Certificate info is stored in a dedicated table used by datastore.Api. - CertifierTableDatastore = "certificate_info_db" -) - -type certifierDatastore struct { - store datastore.Api -} - -// NewCertifierDatastore returns an initialized instance of certifierDatastore as CertifierStorage. -func NewCertifierDatastore(store datastore.Api) CertifierStorage { - return &certifierDatastore{store: store} -} - -func (c *certifierDatastore) ListSerialNumbers() ([]string, error) { - return c.store.ListKeys(CertifierTableDatastore) -} - -func (c *certifierDatastore) GetCertInfo(serialNumber string) (*protos.CertificateInfo, error) { - infos, err := c.GetManyCertInfo([]string{serialNumber}) - if err != nil { - return nil, err - } - for _, info := range infos { - return info, nil - } - return nil, merrors.ErrNotFound -} - -func (c *certifierDatastore) GetManyCertInfo(serialNumbers []string) (map[string]*protos.CertificateInfo, error) { - marshaledInfos, err := c.store.GetMany(CertifierTableDatastore, serialNumbers) - if err != nil { - return nil, errors.Wrap(err, "failed to get many certificate info") - } - - ret := make(map[string]*protos.CertificateInfo) - for sn, mInfoWrapper := range marshaledInfos { - info := &protos.CertificateInfo{} - err = proto.Unmarshal(mInfoWrapper.Value, info) - if err != nil { - return nil, errors.Wrap(err, "failed to unmarshal cert info") - } - ret[sn] = info - } - - return ret, nil -} - -// NOTE: datastore GetAllCertInfo doesn't execute in a single commit. -func (c *certifierDatastore) GetAllCertInfo() (map[string]*protos.CertificateInfo, error) { - serialNumbers, err := c.ListSerialNumbers() - if err != nil { - return nil, errors.Wrap(err, "failed to list serial numbers") - } - return c.GetManyCertInfo(serialNumbers) -} - -func (c *certifierDatastore) PutCertInfo(serialNumber string, certInfo *protos.CertificateInfo) error { - marshaledInfo, err := proto.Marshal(certInfo) - if err != nil { - return errors.Wrap(err, "failed to marshal cert info") - } - - err = c.store.Put(CertifierTableDatastore, serialNumber, marshaledInfo) - if err != nil { - return errors.Wrap(err, "failed to put certificate info") - } - - return nil -} - -func (c *certifierDatastore) DeleteCertInfo(serialNumber string) error { - return c.store.Delete(CertifierTableDatastore, serialNumber) -} diff --git a/orc8r/cloud/go/services/certifier/storage/storage_integ_test.go b/orc8r/cloud/go/services/certifier/storage/storage_integ_test.go index b16c84c8d33c..ddc92f66fe32 100644 --- a/orc8r/cloud/go/services/certifier/storage/storage_integ_test.go +++ b/orc8r/cloud/go/services/certifier/storage/storage_integ_test.go @@ -12,7 +12,6 @@ import ( "testing" "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/services/certifier/protos" "magma/orc8r/cloud/go/services/certifier/storage" "magma/orc8r/cloud/go/sqorc" @@ -34,13 +33,6 @@ func TestCertifierStorageBlobstore_Integation(t *testing.T) { testCertifierStorageImpl(t, store) } -func TestCertifierStorageDatastore_Integation(t *testing.T) { - ds, err := datastore.NewSqlDb("sqlite3", ":memory:", sqorc.GetSqlBuilder()) - assert.NoError(t, err) - store := storage.NewCertifierDatastore(ds) - testCertifierStorageImpl(t, store) -} - func testCertifierStorageImpl(t *testing.T, store storage.CertifierStorage) { sn0 := "serial_number_0" sn1 := "serial_number_1" diff --git a/orc8r/cloud/go/services/certifier/test_init/test_service_init.go b/orc8r/cloud/go/services/certifier/test_init/test_service_init.go index 3ea46b9cc088..a374cf8bd019 100644 --- a/orc8r/cloud/go/services/certifier/test_init/test_service_init.go +++ b/orc8r/cloud/go/services/certifier/test_init/test_service_init.go @@ -19,29 +19,27 @@ import ( "magma/orc8r/cloud/go/services/certifier/storage" "magma/orc8r/cloud/go/test_utils" "magma/orc8r/lib/go/protos" - certifier_test_utils "magma/orc8r/lib/go/security/csr" + certifierTestUtils "magma/orc8r/lib/go/security/csr" ) func StartTestService(t *testing.T) { caMap := map[protos.CertType]*servicers.CAInfo{} - bootstrapCert, bootstrapKey, err := certifier_test_utils.CreateSignedCertAndPrivKey( - time.Duration(time.Hour * 24 * 10)) + bootstrapCert, bootstrapKey, err := certifierTestUtils.CreateSignedCertAndPrivKey(time.Hour * 24 * 10) if err != nil { t.Fatalf("Failed to create bootstrap certifier certificate: %s", err) } else { - caMap[protos.CertType_DEFAULT] = &servicers.CAInfo{bootstrapCert, bootstrapKey} + caMap[protos.CertType_DEFAULT] = &servicers.CAInfo{Cert: bootstrapCert, PrivKey: bootstrapKey} } - vpnCert, vpnKey, err := certifier_test_utils.CreateSignedCertAndPrivKey( - time.Duration(time.Hour * 24 * 10)) + vpnCert, vpnKey, err := certifierTestUtils.CreateSignedCertAndPrivKey(time.Hour * 24 * 10) if err != nil { t.Fatalf("Failed to create VPN certifier certificate: %s", err) } else { - caMap[protos.CertType_VPN] = &servicers.CAInfo{vpnCert, vpnKey} + caMap[protos.CertType_VPN] = &servicers.CAInfo{Cert: vpnCert, PrivKey: vpnKey} } - ds := test_utils.GetMockDatastoreInstance() - certStore := storage.NewCertifierDatastore(ds) + store := test_utils.NewEntStorage(t, storage.CertifierTableBlobstore) + certStore := storage.NewCertifierBlobstore(store) certServer, err := servicers.NewCertifierServer(certStore, caMap) if err != nil { t.Fatalf("Failed to create certifier server: %s", err) diff --git a/orc8r/cloud/go/services/configurator/client_api.go b/orc8r/cloud/go/services/configurator/client_api.go index 8c6a3d0de35c..2a6d163094b8 100644 --- a/orc8r/cloud/go/services/configurator/client_api.go +++ b/orc8r/cloud/go/services/configurator/client_api.go @@ -222,6 +222,8 @@ func LoadNetworksByType(typeVal string, loadMetadata bool, loadConfigs bool) ([] return ret, nil } +// LoadNetwork loads the network identified by the network ID. +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadNetwork(networkID string, loadMetadata bool, loadConfigs bool) (Network, error) { networks, _, err := LoadNetworks([]string{networkID}, loadMetadata, loadConfigs) if err != nil { @@ -233,7 +235,8 @@ func LoadNetwork(networkID string, loadMetadata bool, loadConfigs bool) (Network return networks[0], nil } -// LoadNetworkConfig loads network config of type configType registered under the networkID +// LoadNetworkConfig loads network config of type configType registered under the network ID. +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadNetworkConfig(networkID, configType string) (interface{}, error) { network, err := LoadNetwork(networkID, false, true) if err != nil { @@ -464,21 +467,14 @@ func DeleteInternalEntity(entityType, entityKey string) error { return DeleteEntity(storage.InternalNetworkID, entityType, entityKey) } -// GetPhysicalIDOfEntity gets the physicalID associated with the entity -// identified by (networkID, entityType, entityKey) +// GetPhysicalIDOfEntity gets the physicalID associated with the entity identified by (networkID, entityType, entityKey) +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func GetPhysicalIDOfEntity(networkID, entityType, entityKey string) (string, error) { - entities, _, err := LoadEntities( - networkID, - nil, nil, nil, - []storage2.TypeAndKey{ - {Type: entityType, Key: entityKey}, - }, - EntityLoadCriteria{}, - ) - if err != nil || len(entities) != 1 { + entity, err := LoadEntity(networkID, entityType, entityKey, EntityLoadCriteria{}) + if err != nil { return "", err } - return entities[0].PhysicalID, nil + return entity.PhysicalID, nil } // ListEntityKeys returns all keys for an entity type in a network. @@ -514,6 +510,8 @@ func ListInternalEntityKeys(entityType string) ([]string, error) { return ListEntityKeys(storage.InternalNetworkID, entityType) } +// LoadEntity loads the network entity identified by (network ID, entity type, entity key). +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadEntity(networkID string, entityType string, entityKey string, criteria EntityLoadCriteria) (NetworkEntity, error) { ret := NetworkEntity{} loaded, notFound, err := LoadEntities( @@ -531,6 +529,8 @@ func LoadEntity(networkID string, entityType string, entityKey string, criteria return loaded[0], nil } +// LoadEntityConfig loads the config for the entity identified by (network ID, entity type, entity key). +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadEntityConfig(networkID, entityType, entityKey string) (interface{}, error) { entity, err := LoadEntity(networkID, entityType, entityKey, EntityLoadCriteria{LoadConfig: true}) if err != nil { @@ -542,6 +542,8 @@ func LoadEntityConfig(networkID, entityType, entityKey string) (interface{}, err return entity.Config, nil } +// LoadEntityForPhysicalID loads the network entity identified by the physical ID. +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadEntityForPhysicalID(physicalID string, criteria EntityLoadCriteria) (NetworkEntity, error) { ret := NetworkEntity{} loaded, _, err := LoadEntities( @@ -561,6 +563,8 @@ func LoadEntityForPhysicalID(physicalID string, criteria EntityLoadCriteria) (Ne return loaded[0], nil } +// GetNetworkAndEntityIDForPhysicalID gets the network ID and entity ID for the associated physical ID. +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func GetNetworkAndEntityIDForPhysicalID(physicalID string) (string, string, error) { if len(physicalID) == 0 { return "", "", errors.New("Empty Hardware ID") @@ -616,7 +620,8 @@ func LoadEntities( return ret, entIDsToTKs(resp.EntitiesNotFound), nil } -// LoadInternalEntity calls LoadEntity with the internal networkID +// LoadInternalEntity calls LoadEntity with the internal network ID. +// If not found, returns ErrNotFound from magma/orc8r/lib/go/errors. func LoadInternalEntity(entityType string, entityKey string, criteria EntityLoadCriteria) (NetworkEntity, error) { return LoadEntity(storage.InternalNetworkID, entityType, entityKey, criteria) } diff --git a/orc8r/cloud/go/services/configurator/client_test.go b/orc8r/cloud/go/services/configurator/client_api_test.go similarity index 100% rename from orc8r/cloud/go/services/configurator/client_test.go rename to orc8r/cloud/go/services/configurator/client_api_test.go diff --git a/orc8r/cloud/go/services/configurator/configurator/main.go b/orc8r/cloud/go/services/configurator/configurator/main.go index 910147a25a0e..bbd5fb149591 100644 --- a/orc8r/cloud/go/services/configurator/configurator/main.go +++ b/orc8r/cloud/go/services/configurator/configurator/main.go @@ -14,7 +14,7 @@ and meta data for the network and network entity structures. package main import ( - "magma/orc8r/cloud/go/datastore" + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/configurator" @@ -32,7 +32,7 @@ func main() { if err != nil { glog.Fatalf("Error creating service: %s", err) } - db, err := sqorc.Open(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE) + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } diff --git a/orc8r/cloud/go/services/configurator/storage/sql_integ_test.go b/orc8r/cloud/go/services/configurator/storage/sql_integ_test.go index 5b304f250686..996a538bb09d 100644 --- a/orc8r/cloud/go/services/configurator/storage/sql_integ_test.go +++ b/orc8r/cloud/go/services/configurator/storage/sql_integ_test.go @@ -43,7 +43,7 @@ func TestSqlConfiguratorStorage_Integration(t *testing.T) { err = factory.InitializeServiceStorage() assert.NoError(t, err) - // Check the contract for an empty datastore + // Check the contract for an empty data store store, err := factory.StartTransaction(context.Background(), nil) assert.NoError(t, err) @@ -184,7 +184,7 @@ func TestSqlConfiguratorStorage_Integration(t *testing.T) { assert.NoError(t, store.Commit()) // ======================================================================== - // Empty datastore entity load tests + // Empty data store entity load tests // ======================================================================== store, err = factory.StartTransaction(context.Background(), nil) diff --git a/orc8r/cloud/go/services/device/client_test.go b/orc8r/cloud/go/services/device/client_api_test.go similarity index 100% rename from orc8r/cloud/go/services/device/client_test.go rename to orc8r/cloud/go/services/device/client_api_test.go diff --git a/orc8r/cloud/go/services/device/device/main.go b/orc8r/cloud/go/services/device/device/main.go index dcc74e4a131b..6434b04aba92 100644 --- a/orc8r/cloud/go/services/device/device/main.go +++ b/orc8r/cloud/go/services/device/device/main.go @@ -10,7 +10,6 @@ package main import ( "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/device" @@ -26,7 +25,7 @@ func main() { if err != nil { glog.Fatalf("Error creating device service %s", err) } - db, err := sqorc.Open(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE) + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } diff --git a/orc8r/cloud/go/services/directoryd/client_api.go b/orc8r/cloud/go/services/directoryd/client_api.go index 0dabce7eb049..1b7646e448bc 100644 --- a/orc8r/cloud/go/services/directoryd/client_api.go +++ b/orc8r/cloud/go/services/directoryd/client_api.go @@ -6,8 +6,148 @@ This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. */ -// Package directoryd provides a client API for interacting with the -// directory service, which manages the UE location information package directoryd +import ( + "context" + "fmt" + + "magma/orc8r/cloud/go/orc8r" + "magma/orc8r/cloud/go/services/state" + merrors "magma/orc8r/lib/go/errors" + "magma/orc8r/lib/go/protos" + "magma/orc8r/lib/go/registry" + + "github.com/golang/glog" + "github.com/pkg/errors" +) + const ServiceName = "DIRECTORYD" + +//------------------------------- +// Directoryd service client APIs +//------------------------------- + +// getDirectorydClient returns an RPC connection to the directoryd service. +func getDirectorydClient() (protos.DirectoryLookupClient, error) { + conn, err := registry.GetConnection(ServiceName) + if err != nil { + initErr := merrors.NewInitError(err, ServiceName) + glog.Error(initErr) + return nil, initErr + } + return protos.NewDirectoryLookupClient(conn), err +} + +// GetHostnameForHWID returns the hostname mapped to by hardware ID. +// Derived state, stored in directoryd service. +func GetHostnameForHWID(hwid string) (string, error) { + client, err := getDirectorydClient() + if err != nil { + return "", errors.Wrap(err, "failed to get directoryd client") + } + + res, err := client.GetHostnameForHWID(context.Background(), &protos.GetHostnameForHWIDRequest{Hwid: hwid}) + if err != nil { + return "", fmt.Errorf("failed to get hostname for hwid %s: %s", hwid, err) + } + + return res.Hostname, nil +} + +// MapHWIDToHostname maps a single hwid to a hostname. +// Derived state, stored in directoryd service. +func MapHWIDToHostname(hwid, hostname string) error { + return MapHWIDsToHostnames(map[string]string{hwid: hostname}) +} + +// MapHWIDsToHostnames maps {hwid -> hostname}. +// Derived state, stored in directoryd service. +func MapHWIDsToHostnames(hwidToHostname map[string]string) error { + client, err := getDirectorydClient() + if err != nil { + return errors.Wrap(err, "failed to get directoryd client") + } + + _, err = client.MapHWIDsToHostnames(context.Background(), &protos.MapHWIDToHostnameRequest{HwidToHostname: hwidToHostname}) + if err != nil { + return fmt.Errorf("failed to map hwids to hostnames %v: %s", hwidToHostname, err) + } + + return nil +} + +// GetIMSIForSessionID returns the IMSI mapped to by session ID. +// Derived state, stored in directoryd service. +// NOTE: this mapping is provided on a best-effort basis, meaning +// - a {session ID -> IMSI} mapping may be missing even though the IMSI has a session ID record +// - a {session ID -> IMSI} mapping may be stale +func GetIMSIForSessionID(networkID, sessionID string) (string, error) { + client, err := getDirectorydClient() + if err != nil { + return "", errors.Wrap(err, "failed to get directoryd client") + } + + res, err := client.GetIMSIForSessionID(context.Background(), &protos.GetIMSIForSessionIDRequest{ + NetworkID: networkID, + SessionID: sessionID, + }) + if err != nil { + return "", fmt.Errorf("failed to get imsi for session ID %s under network ID %s: %s", sessionID, networkID, err) + } + + return res.Imsi, nil +} + +// MapSessionIDsToIMSIs maps {session ID -> IMSI}. +// Derived state, stored in directoryd service. +func MapSessionIDsToIMSIs(networkID string, sessionIDToIMSI map[string]string) error { + client, err := getDirectorydClient() + if err != nil { + return errors.Wrap(err, "failed to get directoryd client") + } + + _, err = client.MapSessionIDsToIMSIs(context.Background(), &protos.MapSessionIDToIMSIRequest{ + NetworkID: networkID, + SessionIDToIMSI: sessionIDToIMSI, + }) + if err != nil { + return fmt.Errorf("failed to map session IDs to IMSIs %v under network ID %s: %s", sessionIDToIMSI, networkID, err) + } + + return nil +} + +//-------------------------- +// State service client APIs +//-------------------------- + +// GetHWIDForIMSI returns the HWID mapped to by the IMSI. +// Primary state, stored in state service. +func GetHWIDForIMSI(networkID, imsi string) (string, error) { + st, err := state.GetState(networkID, orc8r.DirectoryRecordType, imsi) + if err != nil { + return "", err + } + record, ok := st.ReportedState.(*DirectoryRecord) + if !ok || len(record.LocationHistory) == 0 { + return "", fmt.Errorf("failed to convert reported state to DirectoryRecord for device id: %s", st.ReporterID) + } + return record.LocationHistory[0], nil +} + +// GetSessionIDForIMSI returns the session ID mapped to by the IMSI. +// Primary state, stored in state service. +func GetSessionIDForIMSI(networkID, imsi string) (string, error) { + st, err := state.GetState(networkID, orc8r.DirectoryRecordType, imsi) + if err != nil { + return "", err + } + + record, ok := st.ReportedState.(*DirectoryRecord) + if !ok { + return "", fmt.Errorf("failed to convert reported state to DirectoryRecord for device id: %s", st.ReporterID) + } + + return record.GetSessionID() +} diff --git a/orc8r/cloud/go/services/directoryd/client_api_test.go b/orc8r/cloud/go/services/directoryd/client_api_test.go new file mode 100644 index 000000000000..f8ee6844f6ce --- /dev/null +++ b/orc8r/cloud/go/services/directoryd/client_api_test.go @@ -0,0 +1,190 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package directoryd_test + +import ( + "testing" + + "magma/orc8r/cloud/go/orc8r" + "magma/orc8r/cloud/go/pluginimpl/models" + "magma/orc8r/cloud/go/serde" + configuratorTestInit "magma/orc8r/cloud/go/services/configurator/test_init" + configuratorTestUtils "magma/orc8r/cloud/go/services/configurator/test_utils" + "magma/orc8r/cloud/go/services/device" + deviceTestInit "magma/orc8r/cloud/go/services/device/test_init" + "magma/orc8r/cloud/go/services/directoryd" + directorydTestInit "magma/orc8r/cloud/go/services/directoryd/test_init" + "magma/orc8r/cloud/go/services/state" + stateTestInit "magma/orc8r/cloud/go/services/state/test_init" + "magma/orc8r/cloud/go/services/state/test_utils" + "magma/orc8r/lib/go/protos" + "magma/orc8r/lib/go/registry" + + "github.com/stretchr/testify/assert" +) + +const ( + hn0 = "some_hostname_0" + hn1 = "some_hostname_1" + hwid0 = "some_hardware_id_0" + hwid1 = "some_hardware_id_1" + imsi0 = "some_imsi_0" + nid0 = "some_network_id_0" + sid0 = "some_session_id_0" + sidWithoutPrefix = "155129" + sidWithIMSIPrefix = "IMSI156304337849371-" + sidWithoutPrefix +) + +func TestGetSessionID(t *testing.T) { + record := &directoryd.DirectoryRecord{ + LocationHistory: []string{hwid0}, // imsi0->hwid0 + Identifiers: map[string]interface{}{ + directoryd.RecordKeySessionID: sid0, // imsi0->sid0 + }, + } + + // Default path + sid, err := record.GetSessionID() + assert.NoError(t, err) + assert.Equal(t, sid0, sid) + + // IMSI-prefixed session ID should remove prefix + record.Identifiers[directoryd.RecordKeySessionID] = sidWithIMSIPrefix + sid, err = record.GetSessionID() + assert.NoError(t, err) + assert.Equal(t, sidWithoutPrefix, sid) + + // Err on non-string sid + record.Identifiers[directoryd.RecordKeySessionID] = 42 + _, err = record.GetSessionID() + assert.Error(t, err) + + // Empty string on no sid + delete(record.Identifiers, directoryd.RecordKeySessionID) + sid, err = record.GetSessionID() + assert.NoError(t, err) + assert.Equal(t, "", sid) +} + +func TestDirectorydMethods(t *testing.T) { + directorydTestInit.StartTestService(t) + + // Empty initially + _, err := directoryd.GetSessionIDForIMSI(nid0, imsi0) + assert.Error(t, err) + _, err = directoryd.GetHostnameForHWID(hwid0) + assert.Error(t, err) + + // Put sid0->imsi0 + err = directoryd.MapSessionIDsToIMSIs(nid0, map[string]string{sid0: imsi0}) + assert.NoError(t, err) + + // Put Many hwid0->hn0 + err = directoryd.MapHWIDsToHostnames(map[string]string{hwid0: hn0}) + assert.NoError(t, err) + + // Put Single hwid1->hn1 + err = directoryd.MapHWIDToHostname(hwid1, hn1) + assert.NoError(t, err) + + // Get sid0->imsi0 + imsi, err := directoryd.GetIMSIForSessionID(nid0, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi, imsi0) + + // Get hwid0->hn0 + hn, err := directoryd.GetHostnameForHWID(hwid0) + assert.NoError(t, err) + assert.Equal(t, hn0, hn) + + // Get hwid1->hn1 + hn, err = directoryd.GetHostnameForHWID(hwid1) + assert.NoError(t, err) + assert.Equal(t, hn1, hn) +} + +func TestDirectorydStateMethods(t *testing.T) { + configuratorTestInit.StartTestService(t) + deviceTestInit.StartTestService(t) + + directorydTestInit.StartTestService(t) + stateTestInit.StartTestService(t) + + stateClient, err := getStateServiceClient(t) + assert.NoError(t, err) + + err = serde.RegisterSerdes( + state.NewStateSerde(orc8r.DirectoryRecordType, &directoryd.DirectoryRecord{}), + serde.NewBinarySerde(device.SerdeDomain, orc8r.AccessGatewayRecordType, &models.GatewayDevice{}), + ) + assert.NoError(t, err) + + configuratorTestUtils.RegisterNetwork(t, nid0, "DirectoryD Service Test") + configuratorTestUtils.RegisterGateway(t, nid0, hwid0, &models.GatewayDevice{HardwareID: hwid0}) + ctx := test_utils.GetContextWithCertificate(t, hwid0) + + record := &directoryd.DirectoryRecord{ + LocationHistory: []string{hwid0}, // imsi0->hwid0 + Identifiers: map[string]interface{}{ + directoryd.RecordKeySessionID: sid0, // imsi0->sid0 + }, + } + serializedRecord, err := record.MarshalBinary() + assert.NoError(t, err) + + st := &protos.State{ + Type: orc8r.DirectoryRecordType, + DeviceID: imsi0, + Value: serializedRecord, + } + stateID := state.StateID{ + Type: st.Type, + DeviceID: st.DeviceID, + } + + // Empty initially + _, err = directoryd.GetHWIDForIMSI(nid0, imsi0) + assert.Error(t, err) + _, err = directoryd.GetSessionIDForIMSI(nid0, imsi0) + assert.Error(t, err) + + // Report state + reqReport := &protos.ReportStatesRequest{States: []*protos.State{st}} + res, err := stateClient.ReportStates(ctx, reqReport) + assert.NoError(t, err) + assert.Empty(t, res.UnreportedStates) + + // Get imsi0->hwid0 + hwid, err := directoryd.GetHWIDForIMSI(nid0, imsi0) + assert.NoError(t, err) + assert.Equal(t, hwid0, hwid) + + // Get imsi0->sid0 + sid, err := directoryd.GetSessionIDForIMSI(nid0, imsi0) + assert.NoError(t, err) + assert.Equal(t, sid0, sid) + + // Delete state + err = state.DeleteStates(nid0, []state.StateID{stateID}) + assert.NoError(t, err) + + // Get imsi0->hwid0, should be gone + hwid, err = directoryd.GetHWIDForIMSI(nid0, imsi0) + assert.Error(t, err) + + // Get imsi0->sid0, should be gone + sid, err = directoryd.GetSessionIDForIMSI(nid0, imsi0) + assert.Error(t, err) +} + +func getStateServiceClient(t *testing.T) (protos.StateServiceClient, error) { + conn, err := registry.GetConnection(state.ServiceName) + assert.NoError(t, err) + return protos.NewStateServiceClient(conn), err +} diff --git a/orc8r/cloud/go/services/directoryd/directoryd/main.go b/orc8r/cloud/go/services/directoryd/directoryd/main.go index d8a52b89cc3f..34fc97529e21 100644 --- a/orc8r/cloud/go/services/directoryd/directoryd/main.go +++ b/orc8r/cloud/go/services/directoryd/directoryd/main.go @@ -9,25 +9,48 @@ LICENSE file in the root directory of this source tree. package main import ( + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/directoryd" + "magma/orc8r/cloud/go/services/directoryd/servicers" + "magma/orc8r/cloud/go/services/directoryd/storage" + "magma/orc8r/cloud/go/sqorc" + "magma/orc8r/lib/go/protos" "github.com/golang/glog" ) -// NOTE: directoryd service currently attaches no handlers. -// The service is preserved for future plans related to custom indexers. func main() { - // Create Magma micro-service - directoryService, err := service.NewOrchestratorService(orc8r.ModuleName, directoryd.ServiceName) + // Create service + srv, err := service.NewOrchestratorService(orc8r.ModuleName, directoryd.ServiceName) if err != nil { glog.Fatalf("Error creating directory service: %s", err) } - // Run the service - glog.V(2).Info("Starting Directory Service...") - err = directoryService.Run() + // Init storage + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) + if err != nil { + glog.Fatalf("Error opening db connection: %s", err) + } + + fact := blobstore.NewEntStorage(storage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder()) + err = fact.InitializeFactory() + if err != nil { + glog.Fatalf("Error initializing directory storage: %s", err) + } + + store := storage.NewDirectorydBlobstore(fact) + + // Add servicers + servicer, err := servicers.NewDirectoryLookupServicer(store) + if err != nil { + glog.Fatalf("Error creating initializing directory servicer: %s", err) + } + protos.RegisterDirectoryLookupServer(srv.GrpcServer, servicer) + + // Run service + err = srv.Run() if err != nil { glog.Fatalf("Error running directory service: %s", err) } diff --git a/orc8r/cloud/go/services/directoryd/doc.go b/orc8r/cloud/go/services/directoryd/doc.go new file mode 100644 index 000000000000..d1d387113f81 --- /dev/null +++ b/orc8r/cloud/go/services/directoryd/doc.go @@ -0,0 +1,20 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +// Package directoryd provides an API for interacting with the +// directory lookup service, which manages UE location records. +// +// Primary state +// - reported directly from the relevant device/gateway +// - managed by the state service +// - versioned +// Secondary state +// - derived, in the controller, from the primary state or other information +// - managed by the directoryd service (DirectoryLookupServer) +// - non-versioned, with availability and correctness provided on a best-effort basis +package directoryd diff --git a/orc8r/cloud/go/services/directoryd/servicers/servicer.go b/orc8r/cloud/go/services/directoryd/servicers/servicer.go new file mode 100644 index 000000000000..9756bc7ffe4c --- /dev/null +++ b/orc8r/cloud/go/services/directoryd/servicers/servicer.go @@ -0,0 +1,77 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package servicers + +import ( + "context" + + "magma/orc8r/cloud/go/services/directoryd/storage" + "magma/orc8r/lib/go/protos" + + "github.com/pkg/errors" +) + +type directoryLookupServicer struct { + store storage.DirectorydStorage +} + +func NewDirectoryLookupServicer(store storage.DirectorydStorage) (protos.DirectoryLookupServer, error) { + srv := &directoryLookupServicer{store: store} + return srv, nil +} + +func (d *directoryLookupServicer) GetHostnameForHWID( + ctx context.Context, req *protos.GetHostnameForHWIDRequest, +) (*protos.GetHostnameForHWIDResponse, error) { + err := req.Validate() + if err != nil { + return nil, errors.Wrap(err, "failed to validate request") + } + + hostname, err := d.store.GetHostnameForHWID(req.Hwid) + res := &protos.GetHostnameForHWIDResponse{Hostname: hostname} + + return res, err +} + +func (d *directoryLookupServicer) MapHWIDsToHostnames(ctx context.Context, req *protos.MapHWIDToHostnameRequest) (*protos.Void, error) { + err := req.Validate() + if err != nil { + return nil, errors.Wrap(err, "failed to validate request") + } + + err = d.store.MapHWIDsToHostnames(req.HwidToHostname) + + return &protos.Void{}, err +} + +func (d *directoryLookupServicer) GetIMSIForSessionID( + ctx context.Context, req *protos.GetIMSIForSessionIDRequest, +) (*protos.GetIMSIForSessionIDResponse, error) { + err := req.Validate() + if err != nil { + return nil, errors.Wrap(err, "failed to validate request") + } + + imsi, err := d.store.GetIMSIForSessionID(req.NetworkID, req.SessionID) + res := &protos.GetIMSIForSessionIDResponse{Imsi: imsi} + + return res, err +} + +func (d *directoryLookupServicer) MapSessionIDsToIMSIs(ctx context.Context, req *protos.MapSessionIDToIMSIRequest) (*protos.Void, error) { + err := req.Validate() + if err != nil { + return nil, errors.Wrap(err, "failed to validate request") + } + + err = d.store.MapSessionIDsToIMSIs(req.NetworkID, req.SessionIDToIMSI) + + return &protos.Void{}, err +} diff --git a/orc8r/cloud/go/services/directoryd/servicers/servicer_test.go b/orc8r/cloud/go/services/directoryd/servicers/servicer_test.go new file mode 100644 index 000000000000..3b9a32598514 --- /dev/null +++ b/orc8r/cloud/go/services/directoryd/servicers/servicer_test.go @@ -0,0 +1,157 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package servicers_test + +import ( + "testing" + + "magma/orc8r/cloud/go/blobstore" + "magma/orc8r/cloud/go/services/directoryd/servicers" + "magma/orc8r/cloud/go/services/directoryd/storage" + stateTestInit "magma/orc8r/cloud/go/services/state/test_init" + "magma/orc8r/cloud/go/sqorc" + "magma/orc8r/lib/go/protos" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +const ( + hn0 = "some_hostname_0" + hn1 = "some_hostname_1" + hn2 = "some_hostname_2" + hwid0 = "some_hardwareid_0" + hwid1 = "some_hardwareid_1" + hwid2 = "some_hardwareid_2" + imsi0 = "some_imsi_0" + imsi1 = "some_imsi_1" + imsi2 = "some_imsi_2" + nid0 = "some_networkid_0" + nid1 = "some_networkid_1" + sid0 = "some_sessionid_0" + sid1 = "some_sessionid_1" + sid2 = "some_sessionid_2" +) + +func newTestDirectoryLookupServicer(t *testing.T) protos.DirectoryLookupServer { + db, err := sqorc.Open("sqlite3", ":memory:") + assert.NoError(t, err) + + fact := blobstore.NewEntStorage(storage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder()) + err = fact.InitializeFactory() + assert.NoError(t, err) + + store := storage.NewDirectorydBlobstore(fact) + srv, err := servicers.NewDirectoryLookupServicer(store) + assert.NoError(t, err) + + return srv +} + +func TestDirectoryLookupServicer_HostnameToHWID(t *testing.T) { + srv := newTestDirectoryLookupServicer(t) + stateTestInit.StartTestService(t) + ctx := context.Background() + + // Empty initially + get := &protos.GetHostnameForHWIDRequest{Hwid: hwid0} + _, err := srv.GetHostnameForHWID(ctx, get) + assert.Error(t, err) + + // Put and get hwid0->hostname0 + put := &protos.MapHWIDToHostnameRequest{HwidToHostname: map[string]string{hwid0: hn0}} + _, err = srv.MapHWIDsToHostnames(ctx, put) + assert.NoError(t, err) + get = &protos.GetHostnameForHWIDRequest{Hwid: hwid0} + res, err := srv.GetHostnameForHWID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, hn0, res.Hostname) + + // Put and get hwid1->hostname1, hwid2->hostname2 + put = &protos.MapHWIDToHostnameRequest{HwidToHostname: map[string]string{hwid1: hn1, hwid2: hn2}} + _, err = srv.MapHWIDsToHostnames(ctx, put) + assert.NoError(t, err) + get = &protos.GetHostnameForHWIDRequest{Hwid: hwid1} + res, err = srv.GetHostnameForHWID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, hn1, res.Hostname) + get = &protos.GetHostnameForHWIDRequest{Hwid: hwid2} + res, err = srv.GetHostnameForHWID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, hn2, res.Hostname) + + // hwid0->hostname0 still intact + get = &protos.GetHostnameForHWIDRequest{Hwid: hwid0} + res, err = srv.GetHostnameForHWID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, hn0, res.Hostname) +} + +func TestDirectoryLookupServicer_SessionIDToIMSI(t *testing.T) { + srv := newTestDirectoryLookupServicer(t) + stateTestInit.StartTestService(t) + ctx := context.Background() + + // Empty initially + get := &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid0} + _, err := srv.GetIMSIForSessionID(ctx, get) + assert.Error(t, err) + + // Put and get sid0->imsi0 + put := &protos.MapSessionIDToIMSIRequest{NetworkID: nid0, SessionIDToIMSI: map[string]string{sid0: imsi0}} + _, err = srv.MapSessionIDsToIMSIs(ctx, put) + assert.NoError(t, err) + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid0} + res, err := srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi0, res.Imsi) + + // Put and get sid1->imsi1, sid2->imsi2 + put = &protos.MapSessionIDToIMSIRequest{NetworkID: nid0, SessionIDToIMSI: map[string]string{sid1: imsi1, sid2: imsi2}} + _, err = srv.MapSessionIDsToIMSIs(ctx, put) + assert.NoError(t, err) + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid1} + res, err = srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi1, res.Imsi) + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid2} + res, err = srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi2, res.Imsi) + + // sid0->imsi0 still intact + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid0} + res, err = srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi0, res.Imsi) + + // Correctly network-partitioned: {nid0: sid0->imsi0, nid1: sid0->imsi1} + put = &protos.MapSessionIDToIMSIRequest{NetworkID: nid0, SessionIDToIMSI: map[string]string{sid0: imsi0}} + _, err = srv.MapSessionIDsToIMSIs(ctx, put) + assert.NoError(t, err) + put = &protos.MapSessionIDToIMSIRequest{NetworkID: nid1, SessionIDToIMSI: map[string]string{sid0: imsi1}} + _, err = srv.MapSessionIDsToIMSIs(ctx, put) + assert.NoError(t, err) + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid0, SessionID: sid0} + res, err = srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi0, res.Imsi) + get = &protos.GetIMSIForSessionIDRequest{NetworkID: nid1, SessionID: sid0} + res, err = srv.GetIMSIForSessionID(ctx, get) + assert.NoError(t, err) + assert.Equal(t, imsi1, res.Imsi) + + // Fail with empty network ID + get = &protos.GetIMSIForSessionIDRequest{SessionID: sid0} + _, err = srv.GetIMSIForSessionID(ctx, get) + assert.Error(t, err) + put = &protos.MapSessionIDToIMSIRequest{SessionIDToIMSI: map[string]string{sid0: imsi0}} + _, err = srv.MapSessionIDsToIMSIs(ctx, put) + assert.Error(t, err) +} diff --git a/orc8r/cloud/go/services/directoryd/storage/storage.go b/orc8r/cloud/go/services/directoryd/storage/storage.go index eacef2574d4c..007092ef45a1 100644 --- a/orc8r/cloud/go/services/directoryd/storage/storage.go +++ b/orc8r/cloud/go/services/directoryd/storage/storage.go @@ -11,9 +11,15 @@ package storage // DirectorydStorage is the persistence service interface for location records. // All Directoryd data accesses from directoryd service must go through this interface. type DirectorydStorage interface { - // GetHostname gets the hostname mapped to by hwid. - GetHostname(hwid string) (string, error) + // GetHostnameForHWID returns the hostname mapped to by hardware ID. + GetHostnameForHWID(hwid string) (string, error) - // PutHostname maps hwid to hostname. - PutHostname(hwid, hostname string) error + // MapHWIDsToHostnames maps {hwid -> hostname}. + MapHWIDsToHostnames(hwidToHostname map[string]string) error + + // GetIMSIForSessionID returns the IMSI mapped to by session ID. + GetIMSIForSessionID(networkID, sessionID string) (string, error) + + // MapSessionIDsToIMSIs maps {session ID -> IMSI}. + MapSessionIDsToIMSIs(networkID string, sessionIDToIMSI map[string]string) error } diff --git a/orc8r/cloud/go/services/directoryd/storage/storage_blobstore.go b/orc8r/cloud/go/services/directoryd/storage/storage_blobstore.go index 36bec7d28eb8..1fba64a7f88b 100644 --- a/orc8r/cloud/go/services/directoryd/storage/storage_blobstore.go +++ b/orc8r/cloud/go/services/directoryd/storage/storage_blobstore.go @@ -9,6 +9,8 @@ LICENSE file in the root directory of this source tree. package storage import ( + "sort" + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/storage" merrors "magma/orc8r/lib/go/errors" @@ -17,21 +19,21 @@ import ( ) const ( - // DirectorydTableBlobstore is the table where blobstore stores directoryd's hwid_to_hostname data. + // DirectorydTableBlobstore is the table where blobstore stores directoryd's data. DirectorydTableBlobstore = "directoryd_blobstore" - // DirectorydDefaultType is the default type field for blobstore storage. - DirectorydDefaultType = "hwid_to_hostname" + // DirectorydTypeHWIDToHostname is the blobstore type field for the hardware ID to hostname mapping. + DirectorydTypeHWIDToHostname = "hwid_to_hostname" + + // DirectorydTypeSessionIDToIMSI is the blobstore type field for the session ID to IMSI mapping. + DirectorydTypeSessionIDToIMSI = "sessionid_to_imsi" - // Blobstore needs a network ID, but directoryd is network-agnostic so we - // use a placeholder value. + // Blobstore needs a network ID, so for network-agnostic types we use a placeholder value. placeholderNetworkID = "placeholder_network" ) // NewDirectorydBlobstore returns a directoryd storage implementation // backed by the provided blobstore factory. -// NOTE: the datastore impl uses tableID as the table name, while here the -// blobstore impl uses tableID as the type field within a single table. func NewDirectorydBlobstore(factory blobstore.BlobStorageFactory) DirectorydStorage { return &directorydBlobstore{factory: factory} } @@ -40,7 +42,7 @@ type directorydBlobstore struct { factory blobstore.BlobStorageFactory } -func (d *directorydBlobstore) GetHostname(hwid string) (string, error) { +func (d *directorydBlobstore) GetHostnameForHWID(hwid string) (string, error) { store, err := d.factory.StartTransaction(&storage.TxOptions{ReadOnly: true}) if err != nil { return "", errors.Wrap(err, "failed to start transaction") @@ -49,7 +51,7 @@ func (d *directorydBlobstore) GetHostname(hwid string) (string, error) { blob, err := store.Get( placeholderNetworkID, - storage.TypeAndKey{Type: DirectorydDefaultType, Key: hwid}, + storage.TypeAndKey{Type: DirectorydTypeHWIDToHostname, Key: hwid}, ) if err == merrors.ErrNotFound { return "", err @@ -62,17 +64,67 @@ func (d *directorydBlobstore) GetHostname(hwid string) (string, error) { return hostname, store.Commit() } -func (d *directorydBlobstore) PutHostname(hwid, hostname string) error { +func (d *directorydBlobstore) MapHWIDsToHostnames(hwidToHostname map[string]string) error { + store, err := d.factory.StartTransaction(&storage.TxOptions{}) + if err != nil { + return errors.Wrap(err, "failed to start transaction") + } + defer store.Rollback() + + blobs := convertKVToBlobs(DirectorydTypeHWIDToHostname, hwidToHostname) + err = store.CreateOrUpdate(placeholderNetworkID, blobs) + if err != nil { + return errors.Wrap(err, "failed to create or update HWID to hostname mapping") + } + return store.Commit() +} + +func (d *directorydBlobstore) GetIMSIForSessionID(networkID, sessionID string) (string, error) { + store, err := d.factory.StartTransaction(&storage.TxOptions{ReadOnly: true}) + if err != nil { + return "", errors.Wrap(err, "failed to start transaction") + } + defer store.Rollback() + + blob, err := store.Get( + networkID, + storage.TypeAndKey{Type: DirectorydTypeSessionIDToIMSI, Key: sessionID}, + ) + if err == merrors.ErrNotFound { + return "", err + } + if err != nil { + return "", errors.Wrap(err, "failed to get IMSI") + } + + imsi := string(blob.Value) + return imsi, store.Commit() +} + +func (d *directorydBlobstore) MapSessionIDsToIMSIs(networkID string, sessionIDToIMSI map[string]string) error { store, err := d.factory.StartTransaction(&storage.TxOptions{}) if err != nil { return errors.Wrap(err, "failed to start transaction") } defer store.Rollback() - blob := blobstore.Blob{Type: DirectorydDefaultType, Key: hwid, Value: []byte(hostname)} - err = store.CreateOrUpdate(placeholderNetworkID, []blobstore.Blob{blob}) + blobs := convertKVToBlobs(DirectorydTypeSessionIDToIMSI, sessionIDToIMSI) + err = store.CreateOrUpdate(networkID, blobs) if err != nil { - return errors.Wrap(err, "failed to create or update location record") + return errors.Wrap(err, "failed to create or update session ID to IMSI mapping") } return store.Commit() } + +// convertKVToBlobs deterministically converts a string-string map to blobstore blobs. +func convertKVToBlobs(typ string, kv map[string]string) []blobstore.Blob { + var blobs []blobstore.Blob + for k, v := range kv { + blobs = append(blobs, blobstore.Blob{Type: typ, Key: k, Value: []byte(v)}) + } + + // Sort by key for deterministic behavior in tests + sort.Slice(blobs, func(i, j int) bool { return blobs[i].Key < blobs[j].Key }) + + return blobs +} diff --git a/orc8r/cloud/go/services/directoryd/storage/storage_blobstore_test.go b/orc8r/cloud/go/services/directoryd/storage/storage_blobstore_test.go index c61b54ed2ff0..ee135196c8fb 100644 --- a/orc8r/cloud/go/services/directoryd/storage/storage_blobstore_test.go +++ b/orc8r/cloud/go/services/directoryd/storage/storage_blobstore_test.go @@ -23,23 +23,22 @@ import ( ) const ( - networkID = "placeholder_network" + placeholderNetworkID = "placeholder_network" ) -func TestDirectorydBlobstoreStorage_GetHostname(t *testing.T) { +func TestDirectorydBlobstoreStorage_GetHostnameForHWID(t *testing.T) { var blobFactMock *mocks.BlobStorageFactory var blobStoreMock *mocks.TransactionalBlobStorage someErr := errors.New("generic error") hwid := "some_hwid" - tk := storage.TypeAndKey{Type: dstorage.DirectorydDefaultType, Key: hwid} + tk := storage.TypeAndKey{Type: dstorage.DirectorydTypeHWIDToHostname, Key: hwid} hostname := "some_hostname" blob := blobstore.Blob{ - Type: tk.Type, - Key: tk.Key, - Value: []byte(hostname), - Version: 0, + Type: tk.Type, + Key: tk.Key, + Value: []byte(hostname), } // Fail to start transaction @@ -48,7 +47,7 @@ func TestDirectorydBlobstoreStorage_GetHostname(t *testing.T) { blobFactMock.On("StartTransaction", mock.Anything).Return(nil, someErr).Once() store := dstorage.NewDirectorydBlobstore(blobFactMock) - _, err := store.GetHostname(hwid) + _, err := store.GetHostnameForHWID(hwid) assert.Error(t, err) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) @@ -58,10 +57,10 @@ func TestDirectorydBlobstoreStorage_GetHostname(t *testing.T) { blobStoreMock = &mocks.TransactionalBlobStorage{} blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() blobStoreMock.On("Rollback").Return(nil).Once() - blobStoreMock.On("Get", networkID, tk).Return(blobstore.Blob{}, merrors.ErrNotFound).Once() + blobStoreMock.On("Get", placeholderNetworkID, tk).Return(blobstore.Blob{}, merrors.ErrNotFound).Once() store = dstorage.NewDirectorydBlobstore(blobFactMock) - _, err = store.GetHostname(hwid) + _, err = store.GetHostnameForHWID(hwid) assert.Exactly(t, merrors.ErrNotFound, err) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) @@ -71,10 +70,10 @@ func TestDirectorydBlobstoreStorage_GetHostname(t *testing.T) { blobStoreMock = &mocks.TransactionalBlobStorage{} blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() blobStoreMock.On("Rollback").Return(nil).Once() - blobStoreMock.On("Get", networkID, tk).Return(blobstore.Blob{}, someErr).Once() + blobStoreMock.On("Get", placeholderNetworkID, tk).Return(blobstore.Blob{}, someErr).Once() store = dstorage.NewDirectorydBlobstore(blobFactMock) - _, err = store.GetHostname(hwid) + _, err = store.GetHostnameForHWID(hwid) assert.Error(t, err) assert.NotEqual(t, merrors.ErrNotFound, err) blobFactMock.AssertExpectations(t) @@ -85,30 +84,187 @@ func TestDirectorydBlobstoreStorage_GetHostname(t *testing.T) { blobStoreMock = &mocks.TransactionalBlobStorage{} blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() blobStoreMock.On("Rollback").Return(nil).Once() - blobStoreMock.On("Get", networkID, tk).Return(blob, nil).Once() + blobStoreMock.On("Get", placeholderNetworkID, tk).Return(blob, nil).Once() blobStoreMock.On("Commit").Return(nil).Once() store = dstorage.NewDirectorydBlobstore(blobFactMock) - hostnameRecvd, err := store.GetHostname(hwid) + hostnameRecvd, err := store.GetHostnameForHWID(hwid) assert.Equal(t, hostname, hostnameRecvd) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) } -func TestDirectorydBlobstoreStorage_PutRecord(t *testing.T) { +func TestDirectorydBlobstoreStorage_MapHWIDToHostname(t *testing.T) { var blobFactMock *mocks.BlobStorageFactory var blobStoreMock *mocks.TransactionalBlobStorage someErr := errors.New("generic error") - hwid := "some_hwid" - tk := storage.TypeAndKey{Type: dstorage.DirectorydDefaultType, Key: hwid} + hwids := []string{"some_hwid_0", "some_hwid_1"} + hostnames := []string{"some_hostname_0", "some_hostname_1"} + hwidToHostname := map[string]string{ + hwids[0]: hostnames[0], + hwids[1]: hostnames[1], + } - hostname := "some_hostname" + tks := []storage.TypeAndKey{ + {Type: dstorage.DirectorydTypeHWIDToHostname, Key: hwids[0]}, + {Type: dstorage.DirectorydTypeHWIDToHostname, Key: hwids[1]}, + } + + blobs := []blobstore.Blob{ + { + Type: tks[0].Type, + Key: tks[0].Key, + Value: []byte(hostnames[0]), + }, + { + Type: tks[1].Type, + Key: tks[1].Key, + Value: []byte(hostnames[1]), + }, + } + + // Fail to start transaction + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(nil, someErr).Once() + store := dstorage.NewDirectorydBlobstore(blobFactMock) + + err := store.MapHWIDsToHostnames(hwidToHostname) + assert.Error(t, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) + + // store.PutRecord fails + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() + blobStoreMock.On("Rollback").Return(nil).Once() + blobStoreMock.On("CreateOrUpdate", placeholderNetworkID, mock.Anything, mock.Anything). + Return(someErr).Once() + store = dstorage.NewDirectorydBlobstore(blobFactMock) + + err = store.MapHWIDsToHostnames(hwidToHostname) + assert.Error(t, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) + + // Success + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() + blobStoreMock.On("Rollback").Return(nil).Once() + blobStoreMock.On("CreateOrUpdate", placeholderNetworkID, blobs). + Return(nil).Once() + blobStoreMock.On("Commit").Return(nil).Once() + store = dstorage.NewDirectorydBlobstore(blobFactMock) + + err = store.MapHWIDsToHostnames(hwidToHostname) + assert.NoError(t, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) +} + +func TestDirectorydBlobstore_GetIMSIForSessionID(t *testing.T) { + var blobFactMock *mocks.BlobStorageFactory + var blobStoreMock *mocks.TransactionalBlobStorage + someErr := errors.New("generic error") + + nid := "some_networkid" + + sid := "some_sessionid" + tk := storage.TypeAndKey{Type: dstorage.DirectorydTypeSessionIDToIMSI, Key: sid} + + imsi := "some_imsi" blob := blobstore.Blob{ - Type: tk.Type, - Key: tk.Key, - Value: []byte(hostname), - Version: 0, + Type: tk.Type, + Key: tk.Key, + Value: []byte(imsi), + } + + // Fail to start transaction + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(nil, someErr).Once() + store := dstorage.NewDirectorydBlobstore(blobFactMock) + + _, err := store.GetIMSIForSessionID(nid, sid) + assert.Error(t, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) + + // store.Get fails with ErrNotFound + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() + blobStoreMock.On("Rollback").Return(nil).Once() + blobStoreMock.On("Get", nid, tk).Return(blobstore.Blob{}, merrors.ErrNotFound).Once() + store = dstorage.NewDirectorydBlobstore(blobFactMock) + + _, err = store.GetIMSIForSessionID(nid, sid) + assert.Exactly(t, merrors.ErrNotFound, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) + + // store.Get fails with error other than ErrNotFound + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() + blobStoreMock.On("Rollback").Return(nil).Once() + blobStoreMock.On("Get", nid, tk).Return(blobstore.Blob{}, someErr).Once() + store = dstorage.NewDirectorydBlobstore(blobFactMock) + + _, err = store.GetIMSIForSessionID(nid, sid) + assert.Error(t, err) + assert.NotEqual(t, merrors.ErrNotFound, err) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) + + // Success + blobFactMock = &mocks.BlobStorageFactory{} + blobStoreMock = &mocks.TransactionalBlobStorage{} + blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() + blobStoreMock.On("Rollback").Return(nil).Once() + blobStoreMock.On("Get", nid, tk).Return(blob, nil).Once() + blobStoreMock.On("Commit").Return(nil).Once() + store = dstorage.NewDirectorydBlobstore(blobFactMock) + + imsiRecvd, err := store.GetIMSIForSessionID(nid, sid) + assert.Equal(t, imsi, imsiRecvd) + blobFactMock.AssertExpectations(t) + blobStoreMock.AssertExpectations(t) +} + +func TestDirectorydBlobstore_MapSessionIDToIMSI(t *testing.T) { + var blobFactMock *mocks.BlobStorageFactory + var blobStoreMock *mocks.TransactionalBlobStorage + someErr := errors.New("generic error") + + nid := "some_networkid" + + sids := []string{"some_sessionid_0", "some_sessionid_1"} + imsis := []string{"some_imsi_0", "some_imsi_1"} + sidToIMSI := map[string]string{ + sids[0]: imsis[0], + sids[1]: imsis[1], + } + + tks := []storage.TypeAndKey{ + {Type: dstorage.DirectorydTypeSessionIDToIMSI, Key: sids[0]}, + {Type: dstorage.DirectorydTypeSessionIDToIMSI, Key: sids[1]}, + } + + blobs := []blobstore.Blob{ + { + Type: tks[0].Type, + Key: tks[0].Key, + Value: []byte(imsis[0]), + }, + { + Type: tks[1].Type, + Key: tks[1].Key, + Value: []byte(imsis[1]), + }, } // Fail to start transaction @@ -117,7 +273,7 @@ func TestDirectorydBlobstoreStorage_PutRecord(t *testing.T) { blobFactMock.On("StartTransaction", mock.Anything).Return(nil, someErr).Once() store := dstorage.NewDirectorydBlobstore(blobFactMock) - err := store.PutHostname(hwid, hostname) + err := store.MapSessionIDsToIMSIs(nid, sidToIMSI) assert.Error(t, err) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) @@ -127,11 +283,11 @@ func TestDirectorydBlobstoreStorage_PutRecord(t *testing.T) { blobStoreMock = &mocks.TransactionalBlobStorage{} blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() blobStoreMock.On("Rollback").Return(nil).Once() - blobStoreMock.On("CreateOrUpdate", networkID, mock.Anything, mock.Anything). + blobStoreMock.On("CreateOrUpdate", nid, mock.Anything, mock.Anything). Return(someErr).Once() store = dstorage.NewDirectorydBlobstore(blobFactMock) - err = store.PutHostname(hwid, hostname) + err = store.MapSessionIDsToIMSIs(nid, sidToIMSI) assert.Error(t, err) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) @@ -141,12 +297,12 @@ func TestDirectorydBlobstoreStorage_PutRecord(t *testing.T) { blobStoreMock = &mocks.TransactionalBlobStorage{} blobFactMock.On("StartTransaction", mock.Anything).Return(blobStoreMock, nil).Once() blobStoreMock.On("Rollback").Return(nil).Once() - blobStoreMock.On("CreateOrUpdate", networkID, []blobstore.Blob{blob}). + blobStoreMock.On("CreateOrUpdate", nid, blobs). Return(nil).Once() blobStoreMock.On("Commit").Return(nil).Once() store = dstorage.NewDirectorydBlobstore(blobFactMock) - err = store.PutHostname(hwid, hostname) + err = store.MapSessionIDsToIMSIs(nid, sidToIMSI) assert.NoError(t, err) blobFactMock.AssertExpectations(t) blobStoreMock.AssertExpectations(t) diff --git a/orc8r/cloud/go/services/directoryd/storage/storage_integ_test.go b/orc8r/cloud/go/services/directoryd/storage/storage_integ_test.go index 8a3fb302426d..7d06c85c1045 100644 --- a/orc8r/cloud/go/services/directoryd/storage/storage_integ_test.go +++ b/orc8r/cloud/go/services/directoryd/storage/storage_integ_test.go @@ -33,36 +33,124 @@ func TestDirectorydStorageBlobstore_Integation(t *testing.T) { func testDirectorydStorageImpl(t *testing.T, store storage.DirectorydStorage) { hwid0 := "some_hwid_0" hwid1 := "some_hwid_1" + hwid2 := "some_hwid_2" + hwid3 := "some_hwid_3" hostname0 := "some_hostname_0" hostname1 := "some_hostname_1" + hostname2 := "some_hostname_2" + hostname3 := "some_hostname_3" + + nid0 := "some_networkid_0" + nid1 := "some_networkid_1" + sid0 := "some_sessionid_0" + sid1 := "some_sessionid_1" + imsi0 := "some_imsi_0" + imsi1 := "some_imsi_1" + + ////////////////////////////// + // Hostname -> HWID + ////////////////////////////// // Empty initially - _, err := store.GetHostname(hwid0) + _, err := store.GetHostnameForHWID(hwid0) assert.Exactly(t, err, merrors.ErrNotFound) - _, err = store.GetHostname(hwid1) + _, err = store.GetHostnameForHWID(hwid1) assert.Exactly(t, err, merrors.ErrNotFound) // Put and Get hwid0->hostname1 - err = store.PutHostname(hwid0, hostname1) + err = store.MapHWIDsToHostnames(map[string]string{hwid0: hostname1}) assert.NoError(t, err) - recvd, err := store.GetHostname(hwid0) + recvd, err := store.GetHostnameForHWID(hwid0) assert.Equal(t, hostname1, recvd) - _, err = store.GetHostname(hwid1) + _, err = store.GetHostnameForHWID(hwid1) assert.Exactly(t, err, merrors.ErrNotFound) // Put and Get hwid0->hostname0 - err = store.PutHostname(hwid0, hostname0) + err = store.MapHWIDsToHostnames(map[string]string{hwid0: hostname0}) + assert.NoError(t, err) + recvd, err = store.GetHostnameForHWID(hwid0) assert.NoError(t, err) - recvd, err = store.GetHostname(hwid0) assert.Equal(t, hostname0, recvd) - _, err = store.GetHostname(hwid1) + _, err = store.GetHostnameForHWID(hwid1) assert.Exactly(t, err, merrors.ErrNotFound) // Put and Get hwid1->hostname1 - err = store.PutHostname(hwid1, hostname1) + err = store.MapHWIDsToHostnames(map[string]string{hwid1: hostname1}) + assert.NoError(t, err) + recvd, err = store.GetHostnameForHWID(hwid0) assert.NoError(t, err) - recvd, err = store.GetHostname(hwid0) assert.Equal(t, hostname0, recvd) - recvd, err = store.GetHostname(hwid1) + recvd, err = store.GetHostnameForHWID(hwid1) + assert.NoError(t, err) assert.Equal(t, hostname1, recvd) + + // Multi-put: Put and Get hwid2->hostname2, hwid3->hostname3 + err = store.MapHWIDsToHostnames(map[string]string{hwid2: hostname2, hwid3: hostname3}) + assert.NoError(t, err) + recvd, err = store.GetHostnameForHWID(hwid2) + assert.NoError(t, err) + assert.Equal(t, hostname2, recvd) + recvd, err = store.GetHostnameForHWID(hwid3) + assert.NoError(t, err) + assert.Equal(t, hostname3, recvd) + + ////////////////////////////// + // Session ID -> IMSI + ////////////////////////////// + + // Empty initially + _, err = store.GetIMSIForSessionID(nid0, sid0) + assert.Exactly(t, err, merrors.ErrNotFound) + _, err = store.GetIMSIForSessionID(nid0, sid1) + assert.Exactly(t, err, merrors.ErrNotFound) + + // Put and Get sid0->imsi1 + err = store.MapSessionIDsToIMSIs(nid0, map[string]string{sid0: imsi1}) + assert.NoError(t, err) + recvd, err = store.GetIMSIForSessionID(nid0, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi1, recvd) + _, err = store.GetIMSIForSessionID(nid0, sid1) + assert.Exactly(t, err, merrors.ErrNotFound) + + // Put and Get sid0->imsi0 + err = store.MapSessionIDsToIMSIs(nid0, map[string]string{sid0: imsi0}) + assert.NoError(t, err) + recvd, err = store.GetIMSIForSessionID(nid0, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi0, recvd) + _, err = store.GetIMSIForSessionID(nid0, sid1) + assert.Exactly(t, err, merrors.ErrNotFound) + + // Put and Get sid1->imsi1 + err = store.MapSessionIDsToIMSIs(nid0, map[string]string{sid1: imsi1}) + assert.NoError(t, err) + recvd, err = store.GetIMSIForSessionID(nid0, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi0, recvd) + recvd, err = store.GetIMSIForSessionID(nid0, sid1) + assert.NoError(t, err) + assert.Equal(t, imsi1, recvd) + + // Multi-put: Put and Get sid0->imsi0, sid1->imsi1 for nid1 + err = store.MapSessionIDsToIMSIs(nid1, map[string]string{sid0: imsi0, sid1: imsi1}) + assert.NoError(t, err) + recvd, err = store.GetIMSIForSessionID(nid1, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi0, recvd) + recvd, err = store.GetIMSIForSessionID(nid1, sid1) + assert.NoError(t, err) + assert.Equal(t, imsi1, recvd) + + // Correctly network-partitioned: {nid0: sid0->imsi0, nid1: sid0->imsi1} + err = store.MapSessionIDsToIMSIs(nid0, map[string]string{sid0: imsi0}) + assert.NoError(t, err) + err = store.MapSessionIDsToIMSIs(nid1, map[string]string{sid0: imsi1}) + assert.NoError(t, err) + recvd, err = store.GetIMSIForSessionID(nid0, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi0, recvd) + recvd, err = store.GetIMSIForSessionID(nid1, sid0) + assert.NoError(t, err) + assert.Equal(t, imsi1, recvd) } diff --git a/orc8r/cloud/go/services/directoryd/test_init/test_service_init.go b/orc8r/cloud/go/services/directoryd/test_init/test_service_init.go index c17ff23ff55c..97e8ed21f1ad 100644 --- a/orc8r/cloud/go/services/directoryd/test_init/test_service_init.go +++ b/orc8r/cloud/go/services/directoryd/test_init/test_service_init.go @@ -11,12 +11,35 @@ package test_init import ( "testing" + "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/services/directoryd" + "magma/orc8r/cloud/go/services/directoryd/servicers" + "magma/orc8r/cloud/go/services/directoryd/storage" + "magma/orc8r/cloud/go/sqorc" "magma/orc8r/cloud/go/test_utils" + "magma/orc8r/lib/go/protos" + + "github.com/stretchr/testify/assert" ) func StartTestService(t *testing.T) { + // Create service srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, directoryd.ServiceName) + + // Init storage + db, err := sqorc.Open("sqlite3", ":memory:") + assert.NoError(t, err) + fact := blobstore.NewEntStorage(storage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder()) + err = fact.InitializeFactory() + assert.NoError(t, err) + store := storage.NewDirectorydBlobstore(fact) + + // Add servicers + servicer, err := servicers.NewDirectoryLookupServicer(store) + assert.NoError(t, err) + protos.RegisterDirectoryLookupServer(srv.GrpcServer, servicer) + + // Run service go srv.RunTest(lis) } diff --git a/orc8r/cloud/go/services/directoryd/types.go b/orc8r/cloud/go/services/directoryd/types.go index c1faaa21c8b5..2ef4390c1b90 100644 --- a/orc8r/cloud/go/services/directoryd/types.go +++ b/orc8r/cloud/go/services/directoryd/types.go @@ -9,11 +9,17 @@ package directoryd import ( "encoding/json" + "errors" + "fmt" + "strings" "github.com/go-openapi/strfmt" "github.com/go-openapi/validate" + "github.com/golang/glog" ) +const RecordKeySessionID = "session_id" + type DirectoryRecord struct { LocationHistory []string `json:"location_history"` @@ -42,3 +48,36 @@ func (m *DirectoryRecord) MarshalBinary() ([]byte, error) { func (m *DirectoryRecord) UnmarshalBinary(b []byte) error { return json.Unmarshal(b, m) } + +// GetSessionID returns the session ID stored in the directory record. +// If no session ID is found, returns empty string. +func (m *DirectoryRecord) GetSessionID() (string, error) { + if m.Identifiers == nil { + return "", errors.New("directory record's identifiers is nil") + } + + sid, ok := m.Identifiers[RecordKeySessionID] + if !ok { + return "", nil + } + + sidStr, ok := sid.(string) + if !ok { + return "", fmt.Errorf("failed to convert session ID value to string: %v", sid) + } + + glog.V(2).Infof("Full session ID: %s", sid) + strippedSid := stripIMSIFromSessionID(sidStr) + return strippedSid, nil +} + +// stripIMSIFromSessionID removes an IMSI prefix from the session ID. +// This exists for backwards compatibility -- in some cases the session ID +// is passed as a dash-separated concatenation of the IMSI and session ID, +// e.g. "IMSI156304337849371-155129". +func stripIMSIFromSessionID(sessionID string) string { + if strings.HasPrefix(sessionID, "IMSI") && strings.Contains(sessionID, "-") { + return strings.Split(sessionID, "-")[1] + } + return sessionID +} diff --git a/orc8r/cloud/go/services/dispatcher/client_api.go b/orc8r/cloud/go/services/dispatcher/client_api.go index 5cdbd35c8e92..f8beb4ca34c6 100644 --- a/orc8r/cloud/go/services/dispatcher/client_api.go +++ b/orc8r/cloud/go/services/dispatcher/client_api.go @@ -8,35 +8,4 @@ LICENSE file in the root directory of this source tree. package dispatcher -import ( - "context" - - "magma/orc8r/lib/go/errors" - "magma/orc8r/lib/go/protos" - platformregistry "magma/orc8r/lib/go/registry" - - "github.com/golang/glog" -) - const ServiceName = "DISPATCHER" - -// GetHostnameForHwid returns the controller hostname mapped for the hwid. -func GetHostnameForHwid(hwid string) (string, error) { - client, err := getDispatcherClient() - if err != nil { - return "", err - } - hostname, err := client.GetHostnameForHwid(context.Background(), &protos.HardwareID{Hwid: hwid}) - return hostname.GetName(), err -} - -// getDispatcherClient returns a new RPC client for the dispatcher service. -func getDispatcherClient() (protos.SyncRPCServiceClient, error) { - conn, err := platformregistry.GetConnection(ServiceName) - if err != nil { - initErr := errors.NewInitError(err, ServiceName) - glog.Error(initErr) - return nil, initErr - } - return protos.NewSyncRPCServiceClient(conn), nil -} diff --git a/orc8r/cloud/go/services/dispatcher/client_api_test.go b/orc8r/cloud/go/services/dispatcher/client_api_test.go deleted file mode 100644 index 947cc3b93e05..000000000000 --- a/orc8r/cloud/go/services/dispatcher/client_api_test.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright (c) Facebook, Inc. and its affiliates. - All rights reserved. - - This source code is licensed under the BSD-style license found in the - LICENSE file in the root directory of this source tree. -*/ - -package dispatcher_test - -import ( - "testing" - - "magma/orc8r/cloud/go/services/dispatcher" - "magma/orc8r/cloud/go/services/dispatcher/test_init" - - "github.com/stretchr/testify/assert" -) - -func TestGetHostnameForHwid(t *testing.T) { - test_init.StartTestService(t) - - // Values seeded during dispatcher test service init - hostname, err := dispatcher.GetHostnameForHwid("some_hwid_0") - assert.NoError(t, err) - assert.Equal(t, "some_hostname_0", hostname) -} diff --git a/orc8r/cloud/go/services/dispatcher/dispatcher/main.go b/orc8r/cloud/go/services/dispatcher/dispatcher/main.go index a9a959800c87..fa6aff5f285e 100644 --- a/orc8r/cloud/go/services/dispatcher/dispatcher/main.go +++ b/orc8r/cloud/go/services/dispatcher/dispatcher/main.go @@ -14,22 +14,19 @@ import ( "net/http" "os" - "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" - dstorage "magma/orc8r/cloud/go/services/directoryd/storage" "magma/orc8r/cloud/go/services/dispatcher" - sync_rpc_broker "magma/orc8r/cloud/go/services/dispatcher/broker" + syncRpcBroker "magma/orc8r/cloud/go/services/dispatcher/broker" "magma/orc8r/cloud/go/services/dispatcher/httpserver" "magma/orc8r/cloud/go/services/dispatcher/servicers" - "magma/orc8r/cloud/go/sqorc" "magma/orc8r/lib/go/protos" "github.com/golang/glog" "google.golang.org/grpc" ) -const HTTP_SERVER_PORT = 9080 +const HttpServerPort = 9080 func main() { // Set MaxConnectionAge to infinity so Sync RPC stream doesn't restart @@ -48,26 +45,14 @@ func main() { } // create a broker - broker := sync_rpc_broker.NewGatewayReqRespBroker() + broker := syncRpcBroker.NewGatewayReqRespBroker() // get ec2 public host name hostName := getHostName() glog.V(2).Infof("hostName is: %v\n", hostName) - // create storage - db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) - if err != nil { - glog.Fatalf("Failed to open db: %s", err) - } - fact := blobstore.NewEntStorage(dstorage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder()) - err = fact.InitializeFactory() - if err != nil { - glog.Fatalf("Failed to initialize storage factory: %s", err) - } - store := dstorage.NewDirectorydBlobstore(fact) - // create servicer - syncRpcServicer, err := servicers.NewSyncRPCService(hostName, broker, store) + syncRpcServicer, err := servicers.NewSyncRPCService(hostName, broker) if err != nil { glog.Fatalf("SyncRPCService Initialization Error: %s", err) } @@ -79,7 +64,7 @@ func main() { srv.GrpcServer.RegisterService(protos.GetLegacyDispatcherDesc(), syncRpcServicer) // run http server - go httpServer.Run(fmt.Sprintf(":%d", HTTP_SERVER_PORT)) + go httpServer.Run(fmt.Sprintf(":%d", HttpServerPort)) err = srv.Run() if err != nil { diff --git a/orc8r/cloud/go/services/dispatcher/gateway_registry/gw_registry.go b/orc8r/cloud/go/services/dispatcher/gateway_registry/gw_registry.go index afa4ba2b02c1..05790f8d53b4 100644 --- a/orc8r/cloud/go/services/dispatcher/gateway_registry/gw_registry.go +++ b/orc8r/cloud/go/services/dispatcher/gateway_registry/gw_registry.go @@ -14,10 +14,9 @@ import ( "sync" "time" - "magma/orc8r/cloud/go/services/dispatcher" + "magma/orc8r/cloud/go/services/directoryd" "magma/orc8r/lib/go/registry" - _ "github.com/mattn/go-sqlite3" "google.golang.org/grpc" "google.golang.org/grpc/metadata" ) @@ -79,7 +78,7 @@ func SetPort(port int) error { // of the Dispatcher grpc server who has an open bidirectional // stream with the gateway with hwId. func GetServiceAddressForGateway(hwId string) (string, error) { - hostName, err := dispatcher.GetHostnameForHwid(hwId) + hostName, err := directoryd.GetHostnameForHWID(hwId) if err != nil { fmt.Printf("err getting hostName in GetServiceAddressForGateway for hwId %v: %v\n", hwId, err) return "", err diff --git a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service.go b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service.go index b6a450ca5380..efe43eed1297 100644 --- a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service.go +++ b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service.go @@ -15,7 +15,7 @@ import ( "time" "magma/orc8r/cloud/go/identity" - dstorage "magma/orc8r/cloud/go/services/directoryd/storage" + "magma/orc8r/cloud/go/services/directoryd" "magma/orc8r/cloud/go/services/dispatcher/broker" "magma/orc8r/lib/go/protos" @@ -32,13 +32,10 @@ type SyncRPCService struct { // hostName is the host at which this service instance is running on hostName string broker broker.GatewayRPCBroker - store dstorage.DirectorydStorage } -func NewSyncRPCService( - hostName string, broker broker.GatewayRPCBroker, store dstorage.DirectorydStorage, -) (protos.SyncRPCServiceServer, error) { - return &SyncRPCService{hostName: hostName, broker: broker, store: store}, nil +func NewSyncRPCService(hostName string, broker broker.GatewayRPCBroker) (protos.SyncRPCServiceServer, error) { + return &SyncRPCService{hostName: hostName, broker: broker}, nil } // SyncRPC exists for backwards compatibility. @@ -65,19 +62,6 @@ func (srv *SyncRPCService) EstablishSyncRPCStream(stream protos.SyncRPCService_E return srv.serveGwId(stream, gw.HardwareId) } -func (srv *SyncRPCService) GetHostnameForHwid(ctx context.Context, hwid *protos.HardwareID) (*protos.Hostname, error) { - if hwid == nil || len(hwid.GetHwid()) == 0 { - return nil, status.Errorf(codes.InvalidArgument, "hwid argument is nil or empty") - } - - hostname, err := srv.store.GetHostname(hwid.Hwid) - if err != nil { - return nil, status.Errorf(codes.NotFound, "hostname not found for hwid") - } - - return &protos.Hostname{Name: hostname}, nil -} - // streamCoordinator manages a SyncRPC bidirectional stream. type streamCoordinator struct { GwID string @@ -192,7 +176,7 @@ func (srv *SyncRPCService) receiveFromStream( // Returning err indicates to end the bidirectional stream. func (srv *SyncRPCService) processSyncRPCResp(resp *protos.SyncRPCResponse, hwId string) error { if resp.HeartBeat { - err := srv.store.PutHostname(hwId, srv.hostName) + err := directoryd.MapHWIDToHostname(hwId, srv.hostName) if err != nil { // Cannot persist so nobody can send things to this // gateway use the stream, therefore return err to end the stream. diff --git a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service_test.go b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service_test.go index c0fc06990e8a..09f669ffe1f9 100644 --- a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service_test.go +++ b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_service_test.go @@ -64,11 +64,6 @@ func TestSyncRPC(t *testing.T) { assert.NoError(t, err) syncRPCClient := protos.NewSyncRPCServiceClient(conn) - // Test GetHostnameForHwid -- values seeded during dispatcher test service init - hostname, err := syncRPCClient.GetHostnameForHwid(context.Background(), &protos.HardwareID{Hwid: "some_hwid_0"}) - assert.NoError(t, err) - assert.Equal(t, "some_hostname_0", hostname.Name) - stream, err := syncRPCClient.EstablishSyncRPCStream(context.Background()) assert.NoError(t, err) waitc := make(chan struct{}) diff --git a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_test_service.go b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_test_service.go index fb8feb727e16..5fcc83bf445a 100644 --- a/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_test_service.go +++ b/orc8r/cloud/go/services/dispatcher/servicers/sync_rpc_test_service.go @@ -9,7 +9,6 @@ LICENSE file in the root directory of this source tree. package servicers import ( - dstorage "magma/orc8r/cloud/go/services/directoryd/storage" "magma/orc8r/cloud/go/services/dispatcher/broker" "magma/orc8r/lib/go/protos" ) @@ -30,8 +29,6 @@ func (srv *testSyncRPCServer) EstablishSyncRPCStream(stream protos.SyncRPCServic return srv.SyncRPCService.EstablishSyncRPCStream(stream) } -func NewTestSyncRPCServer( - hostName string, broker broker.GatewayRPCBroker, store dstorage.DirectorydStorage, -) (*testSyncRPCServer, error) { - return &testSyncRPCServer{SyncRPCService{hostName, broker, store}}, nil +func NewTestSyncRPCServer(hostName string, broker broker.GatewayRPCBroker) (*testSyncRPCServer, error) { + return &testSyncRPCServer{SyncRPCService{hostName, broker}}, nil } diff --git a/orc8r/cloud/go/services/dispatcher/test_init/test_service_init.go b/orc8r/cloud/go/services/dispatcher/test_init/test_service_init.go index f048f3e5eef6..c1e78787dc77 100644 --- a/orc8r/cloud/go/services/dispatcher/test_init/test_service_init.go +++ b/orc8r/cloud/go/services/dispatcher/test_init/test_service_init.go @@ -11,35 +11,20 @@ package test_init import ( "testing" - "magma/orc8r/cloud/go/blobstore" "magma/orc8r/cloud/go/orc8r" - "magma/orc8r/cloud/go/services/directoryd/storage" "magma/orc8r/cloud/go/services/dispatcher" "magma/orc8r/cloud/go/services/dispatcher/broker/mocks" "magma/orc8r/cloud/go/services/dispatcher/servicers" - "magma/orc8r/cloud/go/sqorc" "magma/orc8r/cloud/go/test_utils" "magma/orc8r/lib/go/protos" _ "github.com/mattn/go-sqlite3" - "github.com/stretchr/testify/assert" ) func StartTestService(t *testing.T) *mocks.GatewayRPCBroker { - db, err := sqorc.Open("sqlite3", ":memory:") - assert.NoError(t, err) - fact := blobstore.NewEntStorage(storage.DirectorydTableBlobstore, db, sqorc.GetSqlBuilder()) - err = fact.InitializeFactory() - assert.NoError(t, err) - store := storage.NewDirectorydBlobstore(fact) - - // Seed store with hwid->hostname mapping for validation at the service level - err = store.PutHostname("some_hwid_0", "some_hostname_0") - assert.NoError(t, err) - srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, dispatcher.ServiceName) mockBroker := new(mocks.GatewayRPCBroker) - servicer, err := servicers.NewTestSyncRPCServer("test host name", mockBroker, store) + servicer, err := servicers.NewTestSyncRPCServer("test host name", mockBroker) if err != nil { t.Fatalf("Failed to create syncRPCService servicer: %s", err) } diff --git a/orc8r/cloud/go/services/logger/client_api.go b/orc8r/cloud/go/services/logger/client_api.go deleted file mode 100644 index 8181775bc9a2..000000000000 --- a/orc8r/cloud/go/services/logger/client_api.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package logger - -import ( - "math/rand" - - "magma/orc8r/lib/go/errors" - "magma/orc8r/lib/go/protos" - "magma/orc8r/lib/go/registry" - - "github.com/golang/glog" - "golang.org/x/net/context" -) - -const ServiceName = "LOGGER" - -// getLoggerClient is a utility function to get a RPC connection to the -// loggingService service -func getLoggerClient() (protos.LoggingServiceClient, error) { - conn, err := registry.GetConnection(ServiceName) - if err != nil { - initErr := errors.NewInitError(err, ServiceName) - glog.Error(initErr) - return nil, initErr - } - return protos.NewLoggingServiceClient(conn), err - -} - -// determines if we should log for the specific instance given samplingRate -func shouldLog(samplingRate float64) bool { - return rand.Float64() < samplingRate -} - -///////////////////////////////////// -// User call this directly to log // -//////////////////////////////////// -func LogEntriesToDest(entries []*protos.LogEntry, destination protos.LoggerDestination, samplingRate float64) error { - lg, err := getLoggerClient() - if err != nil { - return err - } - if !shouldLog(samplingRate) { - return nil - } - req := protos.LogRequest{Entries: entries, Destination: destination} - _, err = lg.Log(context.Background(), &req) - return err - -} - -// Log entries to Scribe with SamplingRate 1 (i.e. no sampling) -func LogToScribe(entries []*protos.LogEntry) error { - return LogEntriesToDest(entries, protos.LoggerDestination_SCRIBE, 1) -} - -// Log entries to Scribe with given samplingRate -func LogToScribeWithSamplingRate(entries []*protos.LogEntry, samplingRate float64) error { - return LogEntriesToDest(entries, protos.LoggerDestination_SCRIBE, samplingRate) -} diff --git a/orc8r/cloud/go/services/logger/client_test.go b/orc8r/cloud/go/services/logger/client_test.go deleted file mode 100644 index 5d872cb73eca..000000000000 --- a/orc8r/cloud/go/services/logger/client_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package logger_test - -import ( - "reflect" - "testing" - "time" - - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - - "magma/orc8r/cloud/go/services/logger" - "magma/orc8r/cloud/go/services/logger/test_init" - "magma/orc8r/lib/go/protos" -) - -func TestLoggingServiceClientMethods(t *testing.T) { - mockExporter := test_init.StartTestServiceWithMockExporterExposed(t) - scribeData := map[string]string{"url": "test_url", "ret_code": "test return code", "method": "GET(test)", "operator_info": "Txia dev test", - "client_ip": "123.456.789.123"} - - entry := &protos.LogEntry{Category: "perfpipe_magma_rest_api_stats", NormalMap: scribeData, Time: int64(time.Now().Unix())} - matchEntries := []*protos.LogEntry{entry} - entries := []*protos.LogEntry{proto.Clone(entry).(*protos.LogEntry)} - - mockExporter.On("Submit", mock.MatchedBy(logEntriesMatcher(matchEntries))).Return(nil) - err := logger.LogEntriesToDest(entries, protos.LoggerDestination_SCRIBE, 0) - assert.NoError(t, err) - mockExporter.AssertNotCalled(t, "Submit", entries) - - err = logger.LogEntriesToDest(entries, 1, 1) - assert.Error(t, err) - assert.EqualError(t, err, "rpc error: code = Unknown desc = LoggerDestination 1 not supported") - mockExporter.AssertNotCalled(t, "Submit", entries) - - mockExporter.On("Submit", mock.MatchedBy(logEntriesMatcher(matchEntries))).Return(nil) - err = logger.LogEntriesToDest(entries, protos.LoggerDestination_SCRIBE, 1) - assert.NoError(t, err) - mockExporter.AssertCalled(t, "Submit", mock.AnythingOfType("[]*protos.LogEntry")) -} - -func logEntriesMatcher(expected []*protos.LogEntry) interface{} { - return func(entries []*protos.LogEntry) bool { - cleanupEntries(expected) - cleanupEntries(entries) - return reflect.DeepEqual(entries, expected) - } -} - -func cleanupEntries(entries []*protos.LogEntry) { - for i, e := range entries { - if e != nil { - b, err := protos.Marshal(e) - if err == nil { - ce := &protos.LogEntry{} - err = protos.Unmarshal(b, ce) - if err == nil { - entries[i] = ce - } - } - } - } -} diff --git a/orc8r/cloud/go/services/logger/exporters/exporter.go b/orc8r/cloud/go/services/logger/exporters/exporter.go deleted file mode 100644 index 3907ea75773f..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/exporter.go +++ /dev/null @@ -1,16 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package exporters - -import "magma/orc8r/lib/go/protos" - -type Exporter interface { - // export logEntries - Submit(logEntries []*protos.LogEntry) error -} diff --git a/orc8r/cloud/go/services/logger/exporters/mocks/mock.go b/orc8r/cloud/go/services/logger/exporters/mocks/mock.go deleted file mode 100644 index cc3f645041a7..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/mocks/mock.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package mocks - -import ( - "encoding/json" - "fmt" - - "magma/orc8r/lib/go/protos" - - "github.com/stretchr/testify/mock" -) - -type mockExporter struct { -} - -func NewMockExporter() *mockExporter { - return &mockExporter{} -} - -// prints out the marshaled result of logEntries -func (exporter *mockExporter) Submit(logEntries []*protos.LogEntry) error { - logJson, err := json.Marshal(logEntries) - if err != nil { - return err - } - fmt.Printf("entries to Export in json: %v\n", string(logJson)) - return nil -} - -// can assert methods called on this exporter -type ExposedMockExporter struct { - mock.Mock -} - -func NewExposedMockExporter() *ExposedMockExporter { - return &ExposedMockExporter{} -} - -func (exporter *ExposedMockExporter) Submit(logEntries []*protos.LogEntry) error { - args := exporter.Called(logEntries) - fmt.Printf("\n\nSUBMIT: %+v\n", logEntries) - logJson, err := json.Marshal(logEntries) - if err != nil { - return args.Error(0) - } - fmt.Printf("entries to Export in json: %v\n", string(logJson)) - return args.Error(0) -} diff --git a/orc8r/cloud/go/services/logger/exporters/scribe.go b/orc8r/cloud/go/services/logger/exporters/scribe.go deleted file mode 100644 index cc3cdfb432dc..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/scribe.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package exporters - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "sync" - "time" - - "magma/orc8r/lib/go/protos" - - "github.com/golang/glog" -) - -type HttpClient interface { - PostForm(url string, data url.Values) (resp *http.Response, err error) -} - -type ScribeExporter struct { - scribeUrl string - appId string - appSecret string - queue []*ScribeLogEntry - queueMutex sync.RWMutex - queueLen int - exportInterval time.Duration -} - -func NewScribeExporter( - baseUrl string, - appId string, - appSecret string, - queueLen int, - exportInterval time.Duration, -) *ScribeExporter { - e := new(ScribeExporter) - e.scribeUrl = baseUrl - e.appId = appId - e.appSecret = appSecret - e.queueLen = queueLen - e.exportInterval = exportInterval - return e -} - -func (e *ScribeExporter) Start() { - go e.exportEvery() -} - -func (e *ScribeExporter) exportEvery() { - for _ = range time.Tick(e.exportInterval) { - client := http.DefaultClient - err := e.Export(client) - if err != nil { - glog.Errorf("Error in exporting to scribe: %v\n", err) - } - } -} - -// Write to Scribe -func (e *ScribeExporter) Export(client HttpClient) error { - e.queueMutex.RLock() - logs := e.queue - e.queueMutex.RUnlock() - if len(logs) != 0 { - err := e.write(client, logs) - if err != nil { - return fmt.Errorf("Failed to export to scribe: %v\n", err) - } - // write to ods successful, clear written logs from queue - e.queueMutex.Lock() - e.queue = e.queue[len(logs):] - e.queueMutex.Unlock() - } - return nil -} - -func (e *ScribeExporter) write(client HttpClient, logEntries []*ScribeLogEntry) error { - logJson, err := json.Marshal(logEntries) - if err != nil { - return err - } - accessToken := fmt.Sprintf("%s|%s", e.appId, e.appSecret) - resp, err := client.PostForm(e.scribeUrl, - url.Values{"access_token": {accessToken}, "logs": {string(logJson)}}) - if err != nil { - return err - } - - if resp.StatusCode != 200 { - errMsg, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - err = fmt.Errorf("Scribe status code %d: %s", resp.StatusCode, errMsg) - } - return err -} - -func (e *ScribeExporter) Submit(logEntries []*protos.LogEntry) error { - e.queueMutex.Lock() - defer e.queueMutex.Unlock() - if (len(e.queue) + len(logEntries)) > e.queueLen { - // queue is full, clear queue and log that queue was full - e.queue = []*ScribeLogEntry{} - glog.Warningf("Queue is full, clearing...") - if len(logEntries) > e.queueLen { - return fmt.Errorf("dropping %v logEntries as it exceeds max queue length", len(logEntries)) - } - } - scribeEntries, err := ConvertToScribeLogEntries(logEntries) - if err != nil { - return err - } - e.queue = append(e.queue, scribeEntries...) - return nil -} diff --git a/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils.go b/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils.go deleted file mode 100644 index 47636bb52bc6..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package exporters - -import ( - "encoding/json" - "fmt" - - "magma/orc8r/cloud/go/services/configurator" - "magma/orc8r/lib/go/protos" - - "github.com/golang/glog" -) - -type ScribeLogEntry struct { - Category string `json:"category"` - Message string `json:"message"` -} - -type ScribeLogMessage struct { - Int map[string]int64 `json:"int,omitempty"` - Normal map[string]string `json:"normal,omitempty"` - TagSet []string `json:"tagset,omitempty"` - NormVec []string `json:"normvector,omitempty"` -} - -//convert a slice of protos.LogEntry into a slice of ScribeLogMessage. -// Add networkId and gatewayId into normal map of ScribeLogEntry if -// the original LogEntry had a valid hardware_id. -func ConvertToScribeLogEntries(entries []*protos.LogEntry) ([]*ScribeLogEntry, error) { - scribeEntries := []*ScribeLogEntry{} - for _, entry := range entries { - if entry.Time == 0 { - return nil, fmt.Errorf("ScribeLogEntry %v doesn't have time field set", entry) - } - scribeMsg := ScribeLogMessage{} - // if any of the following fields are nil, they will be omitted when scribeMsg is marshalled into json. - scribeMsg.Normal = entry.NormalMap - scribeMsg.Int = entry.IntMap - scribeMsg.TagSet = entry.TagSet - scribeMsg.NormVec = entry.Normvector - // append Time field to the int map - if scribeMsg.Int == nil { - scribeMsg.Int = map[string]int64{} - } - scribeMsg.Int["time"] = entry.Time - // add gatewayId and networkId if it's a logEntry logged from a gateway - if len(entry.HwId) != 0 { - networkID, gatewayID, err := configurator.GetNetworkAndEntityIDForPhysicalID(entry.HwId) - if err != nil { - glog.Errorf("Error retrieving nwId and gwId for hwId %s in scribeExporter: %v\n", entry.HwId, err) - } - if scribeMsg.Normal == nil { - scribeMsg.Normal = map[string]string{} - } - scribeMsg.Normal["networkId"] = networkID - scribeMsg.Normal["gatewayId"] = gatewayID - } - // marshall scribeMsg into json - msgJson, err := json.Marshal(scribeMsg) - if err != nil { - glog.Errorf("Error formatting scribeMsg %v in scribeExporter: %v\n", scribeMsg, err) - continue - } - scribeEntries = append(scribeEntries, &ScribeLogEntry{Category: entry.Category, Message: string(msgJson)}) - } - return scribeEntries, nil -} diff --git a/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils_test.go b/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils_test.go deleted file mode 100644 index 2f9533fd2f74..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/scribe_entry_utils_test.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package exporters_test - -import ( - "testing" - - "magma/orc8r/cloud/go/orc8r" - "magma/orc8r/cloud/go/pluginimpl/models" - "magma/orc8r/cloud/go/serde" - configuratorti "magma/orc8r/cloud/go/services/configurator/test_init" - configuratortu "magma/orc8r/cloud/go/services/configurator/test_utils" - "magma/orc8r/cloud/go/services/device" - deviceti "magma/orc8r/cloud/go/services/device/test_init" - "magma/orc8r/cloud/go/services/logger/exporters" - "magma/orc8r/lib/go/protos" - - "github.com/stretchr/testify/assert" -) - -func TestScribeEntryUtils(t *testing.T) { - logEntries := []*protos.LogEntry{ - { - Category: "test", - NormalMap: map[string]string{"status": "ACTIVE"}, - IntMap: map[string]int64{"port": 443}, - Time: 12345, - }, - } - scribeEntries, err := exporters.ConvertToScribeLogEntries(logEntries) - assert.NoError(t, err) - assert.Equal(t, 1, len(scribeEntries)) - assert.Equal(t, logEntries[0].Category, scribeEntries[0].Category) - expectedMsg := "{\"int\":{\"port\":443,\"time\":12345},\"normal\":{\"status\":\"ACTIVE\"}}" - assert.Equal(t, expectedMsg, scribeEntries[0].Message) -} - -func TestScribeEntryUtils_WithHWID(t *testing.T) { - configuratorti.StartTestService(t) - deviceti.StartTestService(t) - _ = serde.RegisterSerdes(serde.NewBinarySerde(device.SerdeDomain, orc8r.AccessGatewayRecordType, &models.GatewayDevice{})) - - networkID := "test_network" - gatewayID := "test_gateway" - hwID := "test_hwID" - configuratortu.RegisterNetwork(t, networkID, "") - configuratortu.RegisterGateway(t, networkID, gatewayID, &models.GatewayDevice{HardwareID: hwID}) - - logEntries := []*protos.LogEntry{ - { - Category: "test", - NormalMap: map[string]string{"status": "ACTIVE"}, - IntMap: map[string]int64{"port": 443}, - Time: 12345, - HwId: hwID, - }, - } - scribeEntries, err := exporters.ConvertToScribeLogEntries(logEntries) - assert.NoError(t, err) - assert.Equal(t, 1, len(scribeEntries)) - assert.Equal(t, logEntries[0].Category, scribeEntries[0].Category) - expectedMsg := "{\"int\":{\"port\":443,\"time\":12345},\"normal\":{\"gatewayId\":\"test_gateway\",\"networkId\":\"test_network\",\"status\":\"ACTIVE\"}}" - assert.Equal(t, expectedMsg, scribeEntries[0].Message) -} diff --git a/orc8r/cloud/go/services/logger/exporters/scribe_test.go b/orc8r/cloud/go/services/logger/exporters/scribe_test.go deleted file mode 100644 index 93bb5e4116c0..000000000000 --- a/orc8r/cloud/go/services/logger/exporters/scribe_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package exporters_test - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - "testing" - "time" - - "magma/orc8r/cloud/go/services/logger/exporters" - "magma/orc8r/lib/go/protos" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -type MockClient struct { - mock.Mock -} - -func (client *MockClient) PostForm(url string, data url.Values) (resp *http.Response, err error) { - args := client.Called(url, data) - return args.Get(0).(*http.Response), args.Error(1) -} - -func TestScribeExporter_Submit(t *testing.T) { - exporter := exporters.NewScribeExporter( - "", - "", - "", - 5, - time.Second*10, - ) - logEntries := []*protos.LogEntry{{Category: "test"}, {Category: "test2"}} - err := exporter.Submit(logEntries) - assert.EqualError(t, err, fmt.Sprintf("ScribeLogEntry %v doesn't have time field set", logEntries[0])) - logEntries = []*protos.LogEntry{{Category: "test1", Time: 12345}, {Category: "test2", Time: 23456}} - err = exporter.Submit(logEntries) - assert.NoError(t, err) - err = exporter.Submit(logEntries) - assert.NoError(t, err) - // submiting when queue is full should give error - err = exporter.Submit(logEntries) - assert.NoError(t, err) // queue is cleared - logEntries = []*protos.LogEntry{ - {Category: "test1", Time: 12345}, - {Category: "test2", Time: 23456}, - {Category: "test3", Time: 23457}, - {Category: "test4", Time: 23458}, - {Category: "test5", Time: 23459}, - {Category: "test6", Time: 23460}, - } - err = exporter.Submit(logEntries) - assert.EqualError(t, err, fmt.Sprintf("dropping %v logEntries as it exceeds max queue length", len(logEntries))) -} - -func TestScribeExporter_Export(t *testing.T) { - exporter := exporters.NewScribeExporter( - "", - "", - "", - 2, - time.Second*10, - ) - client := new(MockClient) - resp := &http.Response{StatusCode: 200} - logEntries := []*protos.LogEntry{{Category: "test1", Time: 12345}, {Category: "test2", Time: 23456}} - scribeEntries, err := exporters.ConvertToScribeLogEntries(logEntries) - assert.NoError(t, err) - logJson, err := json.Marshal(scribeEntries) - assert.NoError(t, err) - client.On("PostForm", mock.AnythingOfType("string"), url.Values{"access_token": {"|"}, "logs": {string(logJson)}}).Return(resp, nil) - err = exporter.Export(client) - assert.NoError(t, err) - client.AssertNotCalled(t, "PostForm", mock.AnythingOfType("string"), mock.AnythingOfType("url.Values")) - err = exporter.Submit(logEntries) - assert.NoError(t, err) - err = exporter.Export(client) - assert.NoError(t, err) - client.AssertExpectations(t) -} diff --git a/orc8r/cloud/go/services/logger/logger/main.go b/orc8r/cloud/go/services/logger/logger/main.go deleted file mode 100644 index 1f3a7391fc80..000000000000 --- a/orc8r/cloud/go/services/logger/logger/main.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package main - -import ( - "flag" - "time" - - "magma/orc8r/cloud/go/orc8r" - "magma/orc8r/cloud/go/service" - "magma/orc8r/cloud/go/services/logger" - "magma/orc8r/cloud/go/services/logger/exporters" - "magma/orc8r/cloud/go/services/logger/nghttpxlogger" - "magma/orc8r/cloud/go/services/logger/servicers" - "magma/orc8r/lib/go/protos" - - "github.com/golang/glog" -) - -const ( - SCRIBE_EXPORTER_EXPORT_INTERVAL = time.Second * 60 - SCRIBE_EXPORTER_QUEUE_LENGTH = 100000 - NGHTTPX_LOG_FILE_PATH = "/var/log/nghttpx.log" -) - -var ( - tailNghttpx = flag.Bool("tailNghttpx", false, "Tail Nghttpx Logs and export") -) - -func main() { - // Create the service - srv, err := service.NewOrchestratorService(orc8r.ModuleName, logger.ServiceName) - if err != nil || srv.Config == nil { - glog.Fatalf("Error creating service: %s", err) - } - - if *tailNghttpx { - // run nghttpxlogger on its own goroutine - parser, err := nghttpxlogger.NewNghttpParser() - if err != nil { - glog.Fatalf("Error creating Nghttp Parser: %v\n", err) - } - nghttpxLogger, err := nghttpxlogger.NewNghttpLogger(time.Minute, parser) - if err != nil { - glog.Fatalf("Error creating Nghttp Logger: %v\n", err) - } - glog.V(2).Infof("Running nghttpxlogger...\n") - nghttpxLogger.Run(NGHTTPX_LOG_FILE_PATH) - } - - scribeExportURL := srv.Config.GetRequiredStringParam("scribe_export_url") - scribeAppID := srv.Config.GetRequiredStringParam("scribe_app_id") - scribeAppSecret := srv.Config.GetRequiredStringParam("scribe_app_secret") - - // Initialize exporters - scribeExporter := exporters.NewScribeExporter( - scribeExportURL, - scribeAppID, - scribeAppSecret, - SCRIBE_EXPORTER_QUEUE_LENGTH, - SCRIBE_EXPORTER_EXPORT_INTERVAL, - ) - logExporters := make(map[protos.LoggerDestination]exporters.Exporter) - logExporters[protos.LoggerDestination_SCRIBE] = scribeExporter - - // Add servicers to the service - loggingServ, err := servicers.NewLoggingService(logExporters) - if err != nil { - glog.Fatalf("LoggingService Initialization Error: %s", err) - } - // start exporting asynchronously - scribeExporter.Start() - - protos.RegisterLoggingServiceServer(srv.GrpcServer, loggingServ) - srv.GrpcServer.RegisterService(protos.GetLegacyLoggerDesc(), loggingServ) - // Run the service - err = srv.Run() - if err != nil { - glog.Fatalf("Error running service: %s", err) - } -} diff --git a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger.go b/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger.go deleted file mode 100644 index 6d8ed8321886..000000000000 --- a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package nghttpxlogger - -import ( - "time" - - "magma/orc8r/cloud/go/services/logger" - "magma/orc8r/lib/go/protos" - - "github.com/golang/glog" - "github.com/hpcloud/tail" -) - -const ( - NGHTTPX_SCRIBE_CATEGORY = "perfpipe_magma_rest_api_stats" - SAMPLING_RATE = 1 -) - -type NghttpxLogger struct { - readInterval time.Duration - parser NghttpxParser -} - -// Nghttpx log message is in the following format: -//${time_iso8601}@|@${remote_addr}@|@${http_host}@|@${server_port}@|@${request}@|@${status}@|@${body_bytes_sent}bytes@|@${request_time}ms -// Scribe requires int and string(normal) messages to be separated -type NghttpxMessage struct { - Int map[string]int64 `json:"int"` - Normal map[string]string `json:"normal"` - Time int64 `json:"time"` -} - -// Returns a NghttpxLogger -// Potential loss of data: -// When logger restarts, the logger will drop all log entries which happened while it was down -func NewNghttpLogger(readInterval time.Duration, parser NghttpxParser) (*NghttpxLogger, error) { - return &NghttpxLogger{readInterval: readInterval, parser: parser}, nil -} - -func (nghttpxlogger *NghttpxLogger) Run(filepath string) { - go nghttpxlogger.tail(filepath) -} - -func (nghttpxlogger *NghttpxLogger) tail(filepath string) { - t, err := tail.TailFile(filepath, tail.Config{Poll: true, Follow: true}) - if err != nil { - glog.Errorf("Error opening file %v for tailing: %v\n", filepath, err) - return - } - for line := range t.Lines { - msg, err := nghttpxlogger.parser.Parse(line.Text) - if err != nil { - glog.Errorf("err parsing %s in nghttpx.log: %v\n", line.Text, err) - continue - } - - entries := []*protos.LogEntry{{ - Category: NGHTTPX_SCRIBE_CATEGORY, - NormalMap: msg.Normal, - IntMap: msg.Int, - Time: msg.Time, - }} - // TODO: change to a lower samplingRate as we see fit - err = logger.LogToScribeWithSamplingRate(entries, SAMPLING_RATE) - if err != nil { - glog.Errorf("err sending nghttpx log: %v\n", err) - } - } -} diff --git a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger_test.go b/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger_test.go deleted file mode 100644 index 8b37571ebca7..000000000000 --- a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_logger_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package nghttpxlogger_test - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - "sync" - "testing" - "time" - - "magma/orc8r/cloud/go/services/logger/nghttpxlogger" - "magma/orc8r/cloud/go/services/logger/test_init" - "magma/orc8r/lib/go/protos" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -type mockParser struct { -} - -func (parser *mockParser) Parse(str string) (*nghttpxlogger.NghttpxMessage, error) { - res := strings.Split(str, " ") - status := strings.Trim(res[0], "\x00") - time, err := strconv.Atoi(res[1]) - if err != nil { - time = 123456 - } - return &nghttpxlogger.NghttpxMessage{ - Time: int64(time), - Normal: map[string]string{"status": status}}, - nil -} - -func TestNghttpxLogger_Run(t *testing.T) { - mockExporter := test_init.StartTestServiceWithMockExporterExposed(t) - mockExporter.On("Submit", mock.AnythingOfType("[]*protos.LogEntry")).Return(nil) - parser := mockParser{} - - logger, err := nghttpxlogger.NewNghttpLogger(time.Second, &parser) - assert.NoError(t, err) - // create temp file - f, err := ioutil.TempFile("", "nghttpxlogger-test-") - assert.NoError(t, err) - fileName := f.Name() - defer func() { - r := recover() - _ = f.Close() - _ = os.Remove(fileName) - if r != nil { - panic(r) - } - }() - - // start tailing logfile, logrotate after 3 seconds - // a bit of a hack to prevent timing races with this test - we'll use a - // mutex to prevent truncation from happening at the same time as a write - // in real life, copytruncate log rotation could probably result in the - // tailer losing a line, but we are ok with this. - l := &sync.Mutex{} - logger.Run(fileName) - go SimulateLogRotation(t, fileName, l) - - // write lines to file - for i := 0; i < 6; i++ { - // this line has to be in format " " for testing purpose. See Parse() on mockParser. - l.Lock() - _, err := f.Write([]byte(fmt.Sprintf("testLine %v\n", i+1))) - l.Unlock() - assert.NoError(t, err) - time.Sleep(time.Second) - } - - // assert - for i := 0; i < 6; i++ { - msg := nghttpxlogger.NghttpxMessage{Time: int64(i + 1), Normal: map[string]string{"status": "testLine"}} - logEntries := []*protos.LogEntry{ - { - Category: "perfpipe_magma_rest_api_stats", - NormalMap: msg.Normal, - Time: msg.Time, - }, - } - mockExporter.AssertCalled(t, "Submit", logEntries) - } - mockExporter.AssertNumberOfCalls(t, "Submit", 6) - mockExporter.AssertExpectations(t) -} - -func SimulateLogRotation(t *testing.T, fileName string, lock *sync.Mutex) { - lock.Lock() - defer lock.Unlock() - - time.Sleep(3 * time.Second) - //copytruncate is used for logrotation for nghttpx.log - err := os.Truncate(fileName, 0) - assert.NoError(t, err) -} diff --git a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser.go b/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser.go deleted file mode 100644 index 3b4f384ee246..000000000000 --- a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package nghttpxlogger - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/golang/glog" -) - -type NghttpxParser interface { - Parse(str string) (*NghttpxMessage, error) -} - -type NghttpxParserImpl struct { -} - -//${time_iso8601}@|@${remote_addr}@|@${http_host}@|@${server_port}@|@${request} -//@|@${status}@|@${body_bytes_sent}@|@${request_time}@|@${alpn}@|@ -//${tls_client_serial}@|@${tls_client_subject_name}@|@${tls_session_reused}@|@ -//${tls_sni}@|@${tls_protocol}@|@${tls_cipher}@|@${backend_host} -//@|@${backend_port} -const ( - DELIMITER = "@|@" - NUM_OF_FIELDS = 17 // num of variables in the original string to parse -) - -func NewNghttpParser() (*NghttpxParserImpl, error) { - return &NghttpxParserImpl{}, nil -} - -func (parser *NghttpxParserImpl) Parse(str string) (*NghttpxMessage, error) { - str = strings.Trim(str, "\x00") - res := strings.Split(str, DELIMITER) - if len(res) < NUM_OF_FIELDS { - return nil, fmt.Errorf("Expected # of fields:%v, got: %v", NUM_OF_FIELDS, len(res)) - } - - builder := NewNghttpxScribeDataBuilder() - intMsg, normalMsg, time, errs := builder. - Time(res[0]). - StringField("client_ip", res[1]). - StringField("http_host", res[2]). - IntField("server_port", res[3]). - ClientRequest(res[4]). - StringField("status", res[5]). - IntField("body_bytes_sent", res[6]). - RequestTime(res[7]). - StringField("alpn", res[8]). - StringField("tls_client_serial", res[9]). - StringField("tls_client_subject_name", res[10]). - StringField("tls_session_reused", res[11]). - StringField("tls_sni", res[12]). - StringField("tls_protocol", res[13]). - StringField("tls_cipher", res[14]). - StringField("backend_host", res[15]). - IntField("backend_port", res[16]). - Build() - - msg := NghttpxMessage{Normal: normalMsg, Int: intMsg, Time: time} - if len(errs) != 0 { - return nil, errs[0] - } - return &msg, nil -} - -type NghttpxScribeDataBuilder struct { - normalMsg map[string]string - intMsg map[string]int64 - time int64 - errors []error -} - -func NewNghttpxScribeDataBuilder() *NghttpxScribeDataBuilder { - return &NghttpxScribeDataBuilder{ - normalMsg: map[string]string{}, - intMsg: map[string]int64{}, - errors: []error{}, - } -} - -func (builder *NghttpxScribeDataBuilder) Time(token string) *NghttpxScribeDataBuilder { - t, err := time.Parse(time.RFC3339, token) - if err != nil { - builder.errors = append(builder.errors, fmt.Errorf("Error parsing time: %v", err)) - } else { - builder.time = int64(t.Unix()) - } - return builder -} - -func (builder *NghttpxScribeDataBuilder) StringField(name string, value string) *NghttpxScribeDataBuilder { - builder.normalMsg[name] = value - return builder -} - -func (builder *NghttpxScribeDataBuilder) IntField(name string, value string) *NghttpxScribeDataBuilder { - if value == "-" { - glog.V(2).Infof("Cannot parse %s field with value: %s, ignored", name, value) - return builder - } - - intValue, err := strconv.Atoi(value) - if err != nil { - builder.errors = append(builder.errors, fmt.Errorf("Error parsing %s: %v", name, err)) - } else { - builder.intMsg[name] = int64(intValue) - } - return builder -} - -func (builder *NghttpxScribeDataBuilder) ClientRequest(token string) *NghttpxScribeDataBuilder { - // parse request, setting RequestMethod and RequestUrl - res := strings.Split(token, " ") - if len(res) != 3 { - glog.V(2).Infof("Cannot parse client_request field with value: %s, ignored", token) - } else { - builder.normalMsg["request_method"] = res[0] - builder.normalMsg["request_url"] = res[1] - } - return builder -} - -func (builder *NghttpxScribeDataBuilder) RequestTime(token string) *NghttpxScribeDataBuilder { - reqT, err := strconv.ParseFloat(token, 64) - if err != nil { - glog.V(2).Infof("Cannot parse request_time field with value: %s, ignored", token) - } else { - builder.intMsg["request_time_micro_secs"] = int64(reqT * 1000) - } - return builder -} - -func (builder *NghttpxScribeDataBuilder) Build() (map[string]int64, map[string]string, int64, []error) { - return builder.intMsg, builder.normalMsg, builder.time, builder.errors -} diff --git a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser_test.go b/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser_test.go deleted file mode 100644 index db62720fa9a5..000000000000 --- a/orc8r/cloud/go/services/logger/nghttpxlogger/nghttpx_parser_test.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package nghttpxlogger_test - -import ( - "testing" - - "magma/orc8r/cloud/go/services/logger/nghttpxlogger" - - "github.com/stretchr/testify/assert" -) - -func TestNghttpxParser_Parse(t *testing.T) { - str := "2018-05-11T16:49:10.657Z@|@192.168.80.1@|@192.168.80.10:9443@|@" + - "9443@|@GET / HTTP/2@|@200@|@45@|@0.005@|@h2@|@" + - "fcd2f92750744ead7921876870c3c277@|@CN=test_operator,OU=,O=,C=US@|@r@|@" + - "-@|@TLSv1.2@|@ECDHE-RSA-AES256-GCM-SHA384@|@127.0.0.1@|@9081" - parser, err := nghttpxlogger.NewNghttpParser() - assert.NoError(t, err) - msg, err := parser.Parse(str) - assert.NoError(t, err) - assert.Equal(t, 4, len(msg.Int)) - assert.Equal(t, 13, len(msg.Normal)) - - assert.Equal(t, msg.Time, int64(1526057350)) - assert.Equal(t, msg.Int["server_port"], int64(9443)) - assert.Equal(t, msg.Int["backend_port"], int64(9081)) - assert.Equal(t, msg.Int["body_bytes_sent"], int64(45)) - assert.Equal(t, msg.Int["request_time_micro_secs"], int64(5)) - assert.Equal(t, msg.Normal["alpn"], "h2") - assert.Equal(t, msg.Normal["backend_host"], "127.0.0.1") - assert.Equal(t, msg.Normal["client_ip"], "192.168.80.1") - assert.Equal(t, msg.Normal["http_host"], "192.168.80.10:9443") - assert.Equal(t, msg.Normal["request_method"], "GET") - assert.Equal(t, msg.Normal["request_url"], "/") - assert.Equal(t, msg.Normal["status"], "200") - assert.Equal(t, msg.Normal["tls_cipher"], "ECDHE-RSA-AES256-GCM-SHA384") - assert.Equal(t, msg.Normal["tls_client_serial"], "fcd2f92750744ead7921876870c3c277") - assert.Equal(t, msg.Normal["tls_client_subject_name"], "CN=test_operator,OU=,O=,C=US") - assert.Equal(t, msg.Normal["tls_protocol"], "TLSv1.2") - assert.Equal(t, msg.Normal["tls_session_reused"], "r") - assert.Equal(t, msg.Normal["tls_sni"], "-") -} - -func TestNghttpxParser_ParseSuccessWithWrongFormat(t *testing.T) { - // request_time is populated with "-" instead of a valid number - // backend_port is populated with "-" instead of a valid number - // client_requst is populated with wrong format which does not have space as the delimiter - // Those fields should be ignored and parsed without any issue - str := "2018-05-11T16:49:10.657Z@|@192.168.80.1@|@192.168.80.10:9443@|@" + - "9443@|@GET/HTTP/2@|@200@|@45@|@-@|@h2@|@" + - "fcd2f92750744ead7921876870c3c277@|@CN=test_operator,OU=,O=,C=US@|@r@|@" + - "-@|@TLSv1.2@|@ECDHE-RSA-AES256-GCM-SHA384@|@127.0.0.1@|@-" - - parser, err := nghttpxlogger.NewNghttpParser() - assert.NoError(t, err) - msg, err := parser.Parse(str) - assert.NoError(t, err) - assert.Equal(t, 2, len(msg.Int)) - assert.Equal(t, 11, len(msg.Normal)) - - assert.Equal(t, msg.Time, int64(1526057350)) - assert.Equal(t, msg.Int["server_port"], int64(9443)) - assert.Equal(t, msg.Int["body_bytes_sent"], int64(45)) - assert.Equal(t, msg.Normal["alpn"], "h2") - assert.Equal(t, msg.Normal["backend_host"], "127.0.0.1") - assert.Equal(t, msg.Normal["client_ip"], "192.168.80.1") - assert.Equal(t, msg.Normal["http_host"], "192.168.80.10:9443") - assert.Equal(t, msg.Normal["status"], "200") - assert.Equal(t, msg.Normal["tls_cipher"], "ECDHE-RSA-AES256-GCM-SHA384") - assert.Equal(t, msg.Normal["tls_client_serial"], "fcd2f92750744ead7921876870c3c277") - assert.Equal(t, msg.Normal["tls_client_subject_name"], "CN=test_operator,OU=,O=,C=US") - assert.Equal(t, msg.Normal["tls_protocol"], "TLSv1.2") - assert.Equal(t, msg.Normal["tls_session_reused"], "r") - assert.Equal(t, msg.Normal["tls_sni"], "-") -} - -func TestNghttpxParser_ParseFail(t *testing.T) { - // timestamp is not in the correct format - str := "2018657Z@|@192.168.80.1@|@192.168.80.10:9443@|@" + - "9443@|@GET / HTTP/2@|@200@|@45@|@0.005@|@h2@|@" + - "fcd2f92750744ead7921876870c3c277@|@CN=test_operator,OU=,O=,C=US@|@r@|@" + - "-@|@TLSv1.2@|@ECDHE-RSA-AES256-GCM-SHA384@|@127.0.0.1@|@9081" - parser, err := nghttpxlogger.NewNghttpParser() - assert.NoError(t, err) - _, err = parser.Parse(str) - assert.EqualError(t, err, "Error parsing time: parsing time \"2018657Z\" as \"2006-01-02T15:04:05Z07:00\": cannot parse \"657Z\" as \"-\"") -} diff --git a/orc8r/cloud/go/services/logger/servicers/logging_service.go b/orc8r/cloud/go/services/logger/servicers/logging_service.go deleted file mode 100644 index 3517ebcedc40..000000000000 --- a/orc8r/cloud/go/services/logger/servicers/logging_service.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package servicers - -import ( - "errors" - "fmt" - - "magma/orc8r/cloud/go/services/logger/exporters" - "magma/orc8r/lib/go/protos" - - "golang.org/x/net/context" -) - -type LoggingService struct { - exporters map[protos.LoggerDestination]exporters.Exporter -} - -func NewLoggingService(exporters map[protos.LoggerDestination]exporters.Exporter) (*LoggingService, error) { - if exporters == nil { - return nil, errors.New("exporters cannot be nil") - } - return &LoggingService{exporters: exporters}, nil -} - -// Invoke the right exporter to export logMessages based on the loggerDestination specified by the client. -// Input: LogRequest which specifies a loggerDestination and a slice of logEntries -// Output: error if any -func (srv *LoggingService) Log(ctx context.Context, request *protos.LogRequest) (*protos.Void, error) { - if request == nil { - return new(protos.Void), errors.New("Empty LogRequest") - } - exporter, ok := srv.exporters[request.Destination] - if !ok { - return new(protos.Void), - fmt.Errorf("LoggerDestination %v not supported", request.Destination) - } - err := exporter.Submit(request.Entries) - return new(protos.Void), err -} diff --git a/orc8r/cloud/go/services/logger/test_init/test_service_init.go b/orc8r/cloud/go/services/logger/test_init/test_service_init.go deleted file mode 100644 index e731db0f3a2b..000000000000 --- a/orc8r/cloud/go/services/logger/test_init/test_service_init.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package test_init - -import ( - "testing" - - "magma/orc8r/cloud/go/orc8r" - "magma/orc8r/cloud/go/services/logger" - "magma/orc8r/cloud/go/services/logger/exporters" - "magma/orc8r/cloud/go/services/logger/exporters/mocks" - "magma/orc8r/cloud/go/services/logger/servicers" - "magma/orc8r/cloud/go/test_utils" - "magma/orc8r/lib/go/protos" -) - -// this test init does not expose mockExporter, and caller does not do any handling -func StartTestService(t *testing.T) { - srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, logger.ServiceName) - logExporters := make(map[protos.LoggerDestination]exporters.Exporter) - logExporters[protos.LoggerDestination_SCRIBE] = mocks.NewMockExporter() - loggingSrv, err := servicers.NewLoggingService(logExporters) - if err != nil { - t.Fatalf("Failed to create LoggingService") - } - protos.RegisterLoggingServiceServer( - srv.GrpcServer, - loggingSrv) - go srv.RunTest(lis) -} - -// this test init exposes mockExporter, but caller needs to define .on("Submit", <[]*LogEntry>).Return() -func StartTestServiceWithMockExporterExposed(t *testing.T) *mocks.ExposedMockExporter { - srv, lis := test_utils.NewTestService(t, orc8r.ModuleName, logger.ServiceName) - logExporters := make(map[protos.LoggerDestination]exporters.Exporter) - exporter := mocks.NewExposedMockExporter() - logExporters[protos.LoggerDestination_SCRIBE] = exporter - loggingSrv, err := servicers.NewLoggingService(logExporters) - if err != nil { - t.Fatalf("Failed to create LoggingService") - } - protos.RegisterLoggingServiceServer( - srv.GrpcServer, - loggingSrv) - go srv.RunTest(lis) - return exporter -} diff --git a/orc8r/cloud/go/services/magmad/gateway_api.go b/orc8r/cloud/go/services/magmad/gateway_api.go index 71c70455f534..3999dac89122 100644 --- a/orc8r/cloud/go/services/magmad/gateway_api.go +++ b/orc8r/cloud/go/services/magmad/gateway_api.go @@ -6,6 +6,7 @@ This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. */ +// Package magmad provides functions for taking actions at connected gateways. package magmad import ( @@ -21,6 +22,8 @@ import ( "golang.org/x/net/context" ) +// getGWMagmadClient gets a GRPC client to the magmad service running on the gateway specified by (network ID, gateway ID). +// If gateway not found by configurator, returns ErrNotFound from magma/orc8r/lib/go/errors. func getGWMagmadClient(networkID string, gatewayID string) (protos.MagmadClient, context.Context, error) { hwID, err := configurator.GetPhysicalIDOfEntity(networkID, orc8r.MagmadGatewayType, gatewayID) if err != nil { @@ -35,6 +38,8 @@ func getGWMagmadClient(networkID string, gatewayID string) (protos.MagmadClient, return protos.NewMagmadClient(conn), ctx, nil } +// GatewayReboot reboots a gateway. +// If gateway not registered, returns ErrNotFound from magma/orc8r/lib/go/errors. func GatewayReboot(networkId string, gatewayId string) error { client, ctx, err := getGWMagmadClient(networkId, gatewayId) if err != nil { @@ -44,6 +49,8 @@ func GatewayReboot(networkId string, gatewayId string) error { return err } +// GatewayRestartServices restarts services at a gateway. +// If gateway not registered, returns ErrNotFound from magma/orc8r/lib/go/errors. func GatewayRestartServices(networkId string, gatewayId string, services []string) error { client, ctx, err := getGWMagmadClient(networkId, gatewayId) if err != nil { @@ -53,6 +60,8 @@ func GatewayRestartServices(networkId string, gatewayId string, services []strin return err } +// GatewayPing sends pings from a gateway to a set of hosts. +// If gateway not registered, returns ErrNotFound from magma/orc8r/lib/go/errors. func GatewayPing(networkId string, gatewayId string, packets int32, hosts []string) (*protos.NetworkTestResponse, error) { client, ctx, err := getGWMagmadClient(networkId, gatewayId) if err != nil { @@ -66,6 +75,8 @@ func GatewayPing(networkId string, gatewayId string, packets int32, hosts []stri return client.RunNetworkTests(ctx, &protos.NetworkTestRequest{Pings: pingParams}) } +// GatewayGenericCommand runs a generic command at a gateway. +// If gateway not registered, returns ErrNotFound from magma/orc8r/lib/go/errors. func GatewayGenericCommand(networkId string, gatewayId string, params *protos.GenericCommandParams) (*protos.GenericCommandResponse, error) { client, ctx, err := getGWMagmadClient(networkId, gatewayId) if err != nil { @@ -75,6 +86,8 @@ func GatewayGenericCommand(networkId string, gatewayId string, params *protos.Ge return client.GenericCommand(ctx, params) } +// TailGatewayLogs +// If gateway not registered, returns ErrNotFound from magma/orc8r/lib/go/errors. func TailGatewayLogs(networkId string, gatewayId string, service string) (protos.Magmad_TailLogsClient, error) { client, ctx, err := getGWMagmadClient(networkId, gatewayId) if err != nil { diff --git a/orc8r/cloud/go/services/magmad/obsidian/handlers/gateway_handlers.go b/orc8r/cloud/go/services/magmad/obsidian/handlers/gateway_handlers.go index 095a29dfe528..c73cdd62513e 100644 --- a/orc8r/cloud/go/services/magmad/obsidian/handlers/gateway_handlers.go +++ b/orc8r/cloud/go/services/magmad/obsidian/handlers/gateway_handlers.go @@ -12,12 +12,12 @@ import ( "io" "net/http" - "magma/orc8r/cloud/go/datastore" models2 "magma/orc8r/cloud/go/models" "magma/orc8r/cloud/go/obsidian" "magma/orc8r/cloud/go/pluginimpl/handlers" "magma/orc8r/cloud/go/services/magmad" - magmad_models "magma/orc8r/cloud/go/services/magmad/obsidian/models" + magmadModels "magma/orc8r/cloud/go/services/magmad/obsidian/models" + merrors "magma/orc8r/lib/go/errors" "magma/orc8r/lib/go/protos" "github.com/labstack/echo" @@ -40,7 +40,7 @@ func rebootGateway(c echo.Context) error { err := magmad.GatewayReboot(networkID, gatewayID) if err != nil { - if datastore.IsErrNotFound(err) { + if err == merrors.ErrNotFound { return obsidian.HttpError(err, http.StatusNotFound) } return obsidian.HttpError(err, http.StatusInternalServerError) @@ -62,7 +62,7 @@ func restartServices(c echo.Context) error { } err = magmad.GatewayRestartServices(networkID, gatewayID, services) if err != nil { - if datastore.IsErrNotFound(err) { + if err == merrors.ErrNotFound { return obsidian.HttpError(err, http.StatusNotFound) } return obsidian.HttpError(err, http.StatusInternalServerError) @@ -77,18 +77,18 @@ func gatewayPing(c echo.Context) error { return nerr } - pingRequest := magmad_models.PingRequest{} + pingRequest := magmadModels.PingRequest{} err := c.Bind(&pingRequest) response, err := magmad.GatewayPing(networkID, gatewayID, pingRequest.Packets, pingRequest.Hosts) if err != nil { - if datastore.IsErrNotFound(err) { + if err == merrors.ErrNotFound { return obsidian.HttpError(err, http.StatusNotFound) } return obsidian.HttpError(err, http.StatusInternalServerError) } - var pingResponse magmad_models.PingResponse + var pingResponse magmadModels.PingResponse for _, ping := range response.Pings { - pingResult := &magmad_models.PingResult{ + pingResult := &magmadModels.PingResult{ HostOrIP: &ping.HostOrIp, NumPackets: &ping.NumPackets, PacketsTransmitted: ping.PacketsTransmitted, @@ -107,7 +107,7 @@ func gatewayGenericCommand(c echo.Context) error { return nerr } - request := magmad_models.GenericCommandParams{} + request := magmadModels.GenericCommandParams{} err := c.Bind(&request) if err != nil { return obsidian.HttpError(err, http.StatusBadRequest) @@ -127,7 +127,7 @@ func gatewayGenericCommand(c echo.Context) error { } resp, err := models2.ProtobufStructToJSONMap(response.Response) - genericCommandResponse := magmad_models.GenericCommandResponse{ + genericCommandResponse := magmadModels.GenericCommandResponse{ Response: resp, } return c.JSON(http.StatusOK, genericCommandResponse) @@ -139,7 +139,7 @@ func tailGatewayLogs(c echo.Context) error { return nerr } - request := magmad_models.TailLogsRequest{} + request := magmadModels.TailLogsRequest{} err := c.Bind(&request) if err != nil { return obsidian.HttpError(err, http.StatusBadRequest) diff --git a/orc8r/cloud/go/services/state/client_api.go b/orc8r/cloud/go/services/state/client_api.go index 36c852038099..64db34788c9b 100644 --- a/orc8r/cloud/go/services/state/client_api.go +++ b/orc8r/cloud/go/services/state/client_api.go @@ -11,7 +11,6 @@ package state import ( "context" "encoding/json" - "sync" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/pluginimpl/models" @@ -22,19 +21,25 @@ import ( "github.com/golang/glog" "github.com/thoas/go-funk" - "google.golang.org/grpc" ) -// State includes reported operational state and additional info about the reporter +// State includes reported operational state and additional info about the reporter. +// Internally, we store a piece of state with primary key triplet {network ID, reporter ID, type}. type State struct { - // ID of the entity reporting the state (hwID, cert serial number, etc) + // ReporterID is the ID of the entity reporting the state (hwID, cert serial number, etc). ReporterID string - // TimeMs received in millisecond + // Type determines how the value is deserialized and validated on the cloud service side. + Type string + + // ReportedState is the actual state reported by the device. + ReportedState interface{} + // Version is the reported version of the state. + Version uint64 + + // TimeMs is the time the state was received in milliseconds. TimeMs uint64 - // Cert expiration TimeMs + // CertExpirationTime is the expiration time in milliseconds. CertExpirationTime int64 - ReportedState interface{} - Version uint64 } // SerializedStateWithMeta includes reported operational states and additional info @@ -52,27 +57,14 @@ type StateID struct { DeviceID string } -// Global clientconn that can be reused for this service -var connSingleton = (*grpc.ClientConn)(nil) -var connGuard = sync.Mutex{} - func GetStateClient() (protos.StateServiceClient, error) { - if connSingleton == nil { - // Reading the conn optimistically to avoid unnecessary overhead - connGuard.Lock() - if connSingleton == nil { - conn, err := registry.GetConnection(ServiceName) - if err != nil { - initErr := errors.NewInitError(err, ServiceName) - glog.Error(initErr) - connGuard.Unlock() - return nil, initErr - } - connSingleton = conn - } - connGuard.Unlock() + conn, err := registry.GetConnection(ServiceName) + if err != nil { + initErr := errors.NewInitError(err, ServiceName) + glog.Error(initErr) + return nil, initErr } - return protos.NewStateServiceClient(connSingleton), nil + return protos.NewStateServiceClient(conn), nil } // GetState returns the state specified by the networkID, typeVal, and hwID @@ -123,6 +115,31 @@ func GetStates(networkID string, stateIDs []StateID) (map[StateID]State, error) if err != nil { return nil, err } + return makeStatesByID(res) +} + +// SearchStates returns all states matching the filter arguments. +// typeFilter and keyFilter are both OR clauses, and the final predicate +// applied to the search will be the AND of both filters. +// e.g.: ["t1", "t2"], ["k1", "k2"] => (t1 OR t2) AND (k1 OR k2) +func SearchStates(networkID string, typeFilter []string, keyFilter []string) (map[StateID]State, error) { + client, err := GetStateClient() + if err != nil { + return nil, err + } + + res, err := client.GetStates(context.Background(), &protos.GetStatesRequest{ + NetworkID: networkID, + TypeFilter: typeFilter, + IdFilter: keyFilter, + }) + if err != nil { + return nil, err + } + return makeStatesByID(res) +} + +func makeStatesByID(res *protos.GetStatesResponse) (map[StateID]State, error) { idToValue := map[StateID]State{} for _, pState := range res.States { stateID := StateID{Type: pState.Type, DeviceID: pState.DeviceID} @@ -189,7 +206,7 @@ func fillInGatewayStatusState(state State) *models.GatewayStatus { } func toProtosStateIDs(stateIDs []StateID) []*protos.StateID { - ids := []*protos.StateID{} + var ids []*protos.StateID for _, state := range stateIDs { ids = append(ids, &protos.StateID{Type: state.Type, DeviceID: state.DeviceID}) } @@ -208,6 +225,7 @@ func toState(pState *protos.State) (State, error) { TimeMs: serialized.TimeMs, CertExpirationTime: serialized.CertExpirationTime, ReportedState: iReportedState, + Type: pState.Type, Version: pState.Version, } return state, err diff --git a/orc8r/cloud/go/services/state/client_test.go b/orc8r/cloud/go/services/state/client_api_test.go similarity index 100% rename from orc8r/cloud/go/services/state/client_test.go rename to orc8r/cloud/go/services/state/client_api_test.go diff --git a/orc8r/cloud/go/services/state/indexer/indexer.go b/orc8r/cloud/go/services/state/indexer/indexer.go index c24cd1e986e2..5ea801cded38 100644 --- a/orc8r/cloud/go/services/state/indexer/indexer.go +++ b/orc8r/cloud/go/services/state/indexer/indexer.go @@ -13,17 +13,21 @@ import ( "magma/orc8r/cloud/go/storage" ) +// StateErrors is a mapping of state type+key to error experienced indexing the state. +// Type is state type, key is state reporter ID. +type StateErrors map[storage.TypeAndKey]error + // Indexer creates a set of secondary indices for consumption by a service. // Each Indexer should // - be owned by a single service -// - have a unique ID, alphanumeric with underscores, prefixed by the owning service, -// e.g. directoryd_session_id // - store per-version data in a properly isolated manner, // e.g. different SQL tables for different indexer versions // - have its generated data exposed by the owning service, // i.e. only one other service should access the generated data directly via the storage interface. type Indexer interface { // GetID returns the unique identifier for the indexer. + // Unique ID should be alphanumeric with underscores, prefixed by the owning service, + // e.g. directoryd_sessionid. GetID() string // GetVersion returns the current version for the indexer. @@ -37,23 +41,17 @@ type Indexer interface { // Each version should use e.g. a separate SQL table, so preparing for // a reindex would include creating new table(s). // isFirstReindex is set if this is the first time this indexer has been registered. - PrepareReindex(from, to uint64, isFirstReindex bool) + PrepareReindex(from, to uint64, isFirstReindex bool) error // CompleteReindex indicates the reindex operation is complete. // Any internal state relevant only to the from version can subsequently be // safely removed, e.g. dropping old SQL tables. - CompleteReindex(from, to uint64) + CompleteReindex(from, to uint64) error // Index updates secondary indices based on the added/updated states. - Index(reporterHWID string, states []state.State) (map[storage.TypeAndKey]error, error) + Index(networkID, reporterHWID string, states []state.State) (StateErrors, error) // IndexRemove updates secondary indices based on the removed states. // NOTE: for now, we will defer IndexRemove to future efforts. //IndexRemove(reporterHWID string, states []State) (map[TypeAndKey]error, error) } - -// Subscription denotes a set of primary keys. -type Subscription struct { - Type string - Matcher KeyMatcher -} diff --git a/orc8r/cloud/go/services/state/indexer/mocks/Indexer.go b/orc8r/cloud/go/services/state/indexer/mocks/Indexer.go index e83700f85e5d..1528001ba077 100644 --- a/orc8r/cloud/go/services/state/indexer/mocks/Indexer.go +++ b/orc8r/cloud/go/services/state/indexer/mocks/Indexer.go @@ -5,7 +5,6 @@ package mocks import indexer "magma/orc8r/cloud/go/services/state/indexer" import mock "github.com/stretchr/testify/mock" import state "magma/orc8r/cloud/go/services/state" -import storage "magma/orc8r/cloud/go/storage" // Indexer is an autogenerated mock type for the Indexer type type Indexer struct { @@ -13,8 +12,17 @@ type Indexer struct { } // CompleteReindex provides a mock function with given fields: from, to -func (_m *Indexer) CompleteReindex(from uint64, to uint64) { - _m.Called(from, to) +func (_m *Indexer) CompleteReindex(from uint64, to uint64) error { + ret := _m.Called(from, to) + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, uint64) error); ok { + r0 = rf(from, to) + } else { + r0 = ret.Error(0) + } + + return r0 } // GetID provides a mock function with given fields: @@ -61,22 +69,22 @@ func (_m *Indexer) GetVersion() uint64 { return r0 } -// Index provides a mock function with given fields: reporterHWID, states -func (_m *Indexer) Index(reporterHWID string, states []state.State) (map[storage.TypeAndKey]error, error) { - ret := _m.Called(reporterHWID, states) +// Index provides a mock function with given fields: networkID, reporterHWID, states +func (_m *Indexer) Index(networkID string, reporterHWID string, states []state.State) (indexer.StateErrors, error) { + ret := _m.Called(networkID, reporterHWID, states) - var r0 map[storage.TypeAndKey]error - if rf, ok := ret.Get(0).(func(string, []state.State) map[storage.TypeAndKey]error); ok { - r0 = rf(reporterHWID, states) + var r0 indexer.StateErrors + if rf, ok := ret.Get(0).(func(string, string, []state.State) indexer.StateErrors); ok { + r0 = rf(networkID, reporterHWID, states) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(map[storage.TypeAndKey]error) + r0 = ret.Get(0).(indexer.StateErrors) } } var r1 error - if rf, ok := ret.Get(1).(func(string, []state.State) error); ok { - r1 = rf(reporterHWID, states) + if rf, ok := ret.Get(1).(func(string, string, []state.State) error); ok { + r1 = rf(networkID, reporterHWID, states) } else { r1 = ret.Error(1) } @@ -85,6 +93,15 @@ func (_m *Indexer) Index(reporterHWID string, states []state.State) (map[storage } // PrepareReindex provides a mock function with given fields: from, to, isFirstReindex -func (_m *Indexer) PrepareReindex(from uint64, to uint64, isFirstReindex bool) { - _m.Called(from, to, isFirstReindex) +func (_m *Indexer) PrepareReindex(from uint64, to uint64, isFirstReindex bool) error { + ret := _m.Called(from, to, isFirstReindex) + + var r0 error + if rf, ok := ret.Get(0).(func(uint64, uint64, bool) error); ok { + r0 = rf(from, to, isFirstReindex) + } else { + r0 = ret.Error(0) + } + + return r0 } diff --git a/orc8r/cloud/go/services/state/indexer/matcher.go b/orc8r/cloud/go/services/state/indexer/subscription.go similarity index 69% rename from orc8r/cloud/go/services/state/indexer/matcher.go rename to orc8r/cloud/go/services/state/indexer/subscription.go index 3c5e229630b5..00d7212d6aa4 100644 --- a/orc8r/cloud/go/services/state/indexer/matcher.go +++ b/orc8r/cloud/go/services/state/indexer/subscription.go @@ -8,19 +8,36 @@ package indexer -import "strings" +import ( + "strings" + + "magma/orc8r/cloud/go/services/state" +) + +// Subscription denotes a set of primary keys. +type Subscription struct { + Type string + KeyMatcher KeyMatcher +} // KeyMatcher indicates whether a particular key matches some pattern. type KeyMatcher interface { Match(s string) bool } +func (s *Subscription) Match(st state.State) bool { + if typeMatch := s.Type == st.Type; !typeMatch { + return false + } + return s.KeyMatcher.Match(st.ReporterID) +} + type matchAll struct{} type matchExact struct{ exact string } type matchPrefix struct{ prefix string } -// NewMatchAll returns a new KeyMatcher that matches all keys. -func NewMatchAll() KeyMatcher { return &matchAll{} } +// MatchAll is a singleton key matcher for matching all keys. +var MatchAll KeyMatcher = &matchAll{} // NewMatchExact returns a new KeyMatcher that matches keys exactly matching exact. func NewMatchExact(exact string) KeyMatcher { return &matchExact{exact: exact} } diff --git a/orc8r/cloud/go/services/state/indexer/matcher_test.go b/orc8r/cloud/go/services/state/indexer/subscription_test.go similarity index 58% rename from orc8r/cloud/go/services/state/indexer/matcher_test.go rename to orc8r/cloud/go/services/state/indexer/subscription_test.go index b3c6f0c3a668..803e48c85a33 100644 --- a/orc8r/cloud/go/services/state/indexer/matcher_test.go +++ b/orc8r/cloud/go/services/state/indexer/subscription_test.go @@ -10,14 +10,45 @@ package indexer_test import ( "github.com/stretchr/testify/assert" + "magma/orc8r/cloud/go/services/state" "magma/orc8r/cloud/go/services/state/indexer" "testing" ) +const ( + imsi0 = "some_imsi_0" + imsi1 = "some_imsi_1" + type0 = "some_type_0" + type1 = "some_type_1" +) + var emptyMatch []string +func TestSubscription_Match(t *testing.T) { + sub := indexer.Subscription{Type: type0} + + state00 := state.State{ReporterID: imsi0, Type: type0} + state10 := state.State{ReporterID: imsi1, Type: type0} + state01 := state.State{ReporterID: imsi0, Type: type1} + state11 := state.State{ReporterID: imsi1, Type: type1} + + // Match all + sub.KeyMatcher = indexer.MatchAll + assert.True(t, sub.Match(state00)) + assert.True(t, sub.Match(state10)) + assert.False(t, sub.Match(state01)) + assert.False(t, sub.Match(state11)) + + // Match exact + sub.KeyMatcher = indexer.NewMatchExact(imsi0) + assert.True(t, sub.Match(state00)) + assert.False(t, sub.Match(state10)) + assert.False(t, sub.Match(state01)) + assert.False(t, sub.Match(state11)) +} + func TestMatchAll_Match(t *testing.T) { - m := indexer.NewMatchAll() + m := indexer.MatchAll yesMatch := []string{"", "f", "fo", "foo", "foobar", "foobarbaz", "foobar !@#$%^&*()_+"} testMatchImpl(t, m, yesMatch, emptyMatch) } diff --git a/orc8r/cloud/go/services/state/state/main.go b/orc8r/cloud/go/services/state/state/main.go index 1754dd1f5771..022e71967d0f 100644 --- a/orc8r/cloud/go/services/state/state/main.go +++ b/orc8r/cloud/go/services/state/state/main.go @@ -12,7 +12,6 @@ import ( "time" "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/state" @@ -32,7 +31,7 @@ func main() { if err != nil { glog.Fatalf("Error creating state service %s", err) } - db, err := sqorc.Open(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE) + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } diff --git a/orc8r/cloud/go/services/state/test_utils/utils.go b/orc8r/cloud/go/services/state/test_utils/utils.go index 15c82d2a2d85..929cc77928eb 100644 --- a/orc8r/cloud/go/services/state/test_utils/utils.go +++ b/orc8r/cloud/go/services/state/test_utils/utils.go @@ -44,6 +44,23 @@ func ReportGatewayStatus(t *testing.T, ctx context.Context, req *models.GatewayS assert.NoError(t, err) } +func ReportState(t *testing.T, ctx context.Context, stateType string, stateKey string, req interface{}) { + client, err := state.GetStateClient() + assert.NoError(t, err) + serializedState, err := serde.Serialize(state.SerdeDomain, stateType, req) + assert.NoError(t, err) + states := []*protos.State{ + { + Type: stateType, + DeviceID: stateKey, + Value: serializedState, + }, + } + res, err := client.ReportStates(ctx, &protos.ReportStatesRequest{States: states}) + assert.NoError(t, err) + assert.Empty(t, res.UnreportedStates) +} + func GetContextWithCertificate(t *testing.T, hwID string) context.Context { csn := test_utils.StartMockGwAccessControl(t, []string{hwID}) return metadata.NewOutgoingContext( diff --git a/orc8r/cloud/go/services/tenants/tenants/main.go b/orc8r/cloud/go/services/tenants/tenants/main.go index 00e68f8e5174..073de8ea243f 100644 --- a/orc8r/cloud/go/services/tenants/tenants/main.go +++ b/orc8r/cloud/go/services/tenants/tenants/main.go @@ -10,7 +10,6 @@ package main import ( "magma/orc8r/cloud/go/blobstore" - "magma/orc8r/cloud/go/datastore" "magma/orc8r/cloud/go/orc8r" "magma/orc8r/cloud/go/service" "magma/orc8r/cloud/go/services/tenants" @@ -27,7 +26,7 @@ func main() { if err != nil { glog.Fatalf("Error creating state service %s", err) } - db, err := sqorc.Open(datastore.SQL_DRIVER, datastore.DATABASE_SOURCE) + db, err := sqorc.Open(blobstore.SQLDriver, blobstore.DatabaseSource) if err != nil { glog.Fatalf("Failed to connect to database: %s", err) } diff --git a/orc8r/cloud/go/test_utils/datastore_test_utils.go b/orc8r/cloud/go/test_utils/datastore_test_utils.go deleted file mode 100644 index 8018567242ce..000000000000 --- a/orc8r/cloud/go/test_utils/datastore_test_utils.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package test_utils - -import ( - "fmt" - "testing" - - "magma/orc8r/cloud/go/datastore" - "magma/orc8r/lib/go/protos" - - "github.com/golang/protobuf/proto" - "github.com/stretchr/testify/assert" -) - -/* - Assert the datastore has some expected rows -*/ -func AssertDatastoreHasRows( - t *testing.T, - store datastore.Api, - tableKey string, - expectedRows map[string]interface{}, - deserializer func([]byte) (interface{}, error), -) { - marshaledValueWrappers, err := store.GetMany(tableKey, getMapKeys(expectedRows)) - assert.NoError(t, err) - assert.Equal( - t, len(expectedRows), len(marshaledValueWrappers), - "Expected %d rows in datastore, actual %d", len(expectedRows), len(marshaledValueWrappers), - ) - - for k, v := range marshaledValueWrappers { - unmarshaledVal, err := deserializer(v.Value) - assert.NoError(t, err) - - expectedVal, ok := expectedRows[k] - assert.True(t, ok) - valMsg, ok := unmarshaledVal.(proto.Message) - if ok { - expectedMsg, ok := expectedVal.(proto.Message) - assert.True(t, ok) - assert.Equal(t, protos.TestMarshal(expectedMsg), protos.TestMarshal(valMsg)) - } else { - assert.Equal(t, expectedVal, unmarshaledVal) - } - - } -} - -func getMapKeys(in map[string]interface{}) []string { - ret := make([]string, 0, len(in)) - for k := range in { - ret = append(ret, k) - } - return ret -} - -/* - Assert that the datastore does not have an entry for a specific key. -*/ -func AssertDatastoreDoesNotHaveRow( - t *testing.T, - store datastore.Api, - tableKey string, - rowKey string, -) { - allKeys, err := store.ListKeys(tableKey) - assert.NoError(t, err) - for _, k := range allKeys { - if k == rowKey { - assert.Fail( - t, - fmt.Sprintf("Found table key %s which is not supposed to exist", rowKey)) - } - } -} - -/* - Insert some test fixture data into the datastore -*/ -func SetupTestFixtures( - t *testing.T, - store datastore.Api, - tableKey string, - fixtures map[string]interface{}, - serializer func(interface{}) ([]byte, error), -) { - for k, val := range fixtures { - marshaledVal, err := serializer(val) - assert.NoError(t, err) - err = store.Put(tableKey, k, marshaledVal) - assert.NoError(t, err) - } -} diff --git a/orc8r/cloud/go/test_utils/mock_datastore.go b/orc8r/cloud/go/test_utils/mock_datastore.go deleted file mode 100644 index ba7c4f82a376..000000000000 --- a/orc8r/cloud/go/test_utils/mock_datastore.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright (c) Facebook, Inc. and its affiliates. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. -*/ - -package test_utils - -import ( - "sync" - - "magma/orc8r/cloud/go/datastore" -) - -type mockDatastoreTable map[string][]byte - -// Datastore backed by a golang map -type MockDatastore struct { - store map[string]mockDatastoreTable -} - -var instance *MockDatastore -var once sync.Once - -// Get a singleton mock datastore instance for tests that require multiple -// test services -func GetMockDatastoreInstance() *MockDatastore { - once.Do(func() { - instance = NewMockDatastore() - }) - return instance -} - -func NewMockDatastore() *MockDatastore { - ds := new(MockDatastore) - ds.store = make(map[string]mockDatastoreTable, 0) - return ds -} - -func (m *MockDatastore) initTable(table string) { - if _, ok := m.store[table]; !ok { - m.store[table] = make(map[string][]byte, 0) - } -} - -func (m *MockDatastore) Put(table string, key string, value []byte) error { - m.initTable(table) - m.store[table][key] = value - return nil -} - -func (m *MockDatastore) PutMany(table string, valuesToPut map[string][]byte) (map[string]error, error) { - m.initTable(table) - for k, v := range valuesToPut { - m.store[table][k] = v - } - return map[string]error{}, nil -} - -func (m *MockDatastore) Get(table string, key string) ([]byte, uint64, error) { - m.initTable(table) - value, ok := m.store[table][key] - if ok { - return value, 0, nil - } - return nil, 0, datastore.ErrNotFound -} - -func (m *MockDatastore) GetMany(table string, keys []string) (map[string]datastore.ValueWrapper, error) { - m.initTable(table) - ret := make(map[string]datastore.ValueWrapper, len(keys)) - for _, k := range keys { - val, ok := m.store[table][k] - if ok { - ret[k] = datastore.ValueWrapper{ - Value: val, - Generation: 0, - } - } - } - return ret, nil -} - -func (m *MockDatastore) Delete(table string, key string) error { - m.initTable(table) - - delete(m.store[table], key) - return nil -} - -func (m *MockDatastore) DeleteMany(table string, keys []string) (map[string]error, error) { - m.initTable(table) - for _, k := range keys { - delete(m.store[table], k) - } - return map[string]error{}, nil -} - -func (m *MockDatastore) ListKeys(table string) ([]string, error) { - m.initTable(table) - keys := make([]string, 0, len(m.store[table])) - for key := range m.store[table] { - keys = append(keys, key) - } - return keys, nil -} - -func (m *MockDatastore) DeleteTable(table string) error { - m.initTable(table) - delete(m.store, table) - return nil -} - -func (m *MockDatastore) DoesKeyExist(table string, key string) (bool, error) { - m.initTable(table) - _, ok := m.store[table][key] - if ok { - return true, nil - } else { - return false, nil - } -} diff --git a/orc8r/cloud/go/test_utils/service.go b/orc8r/cloud/go/test_utils/service.go index 141fc9e5f0ba..88ef314c8413 100644 --- a/orc8r/cloud/go/test_utils/service.go +++ b/orc8r/cloud/go/test_utils/service.go @@ -16,9 +16,8 @@ import ( "magma/orc8r/lib/go/registry" ) -// Creates & Initializes test magma service on a dynamically selected available -// local port -// Returns the newly created service and net.Listener, it was registered with +// Creates & Initializes test magma service on a dynamically selected available local port. +// Returns the newly created service and net.Listener, it was registered with. func NewTestService(t *testing.T, moduleName string, serviceType string) (*service.Service, net.Listener) { // Create the server socket for gRPC lis, err := net.Listen("tcp", "") @@ -30,7 +29,7 @@ func NewTestService(t *testing.T, moduleName string, serviceType string) (*servi if err != nil { t.Fatalf("failed to resolve TCP address: %s", err) } - registry.AddService(registry.ServiceLocation{Name: string(serviceType), Host: "localhost", Port: addr.Port}) + registry.AddService(registry.ServiceLocation{Name: serviceType, Host: "localhost", Port: addr.Port}) // Create the service srv, err := service.NewTestOrchestratorService(t, moduleName, serviceType) diff --git a/orc8r/cloud/go/test_utils/test_db.go b/orc8r/cloud/go/test_utils/test_db.go new file mode 100644 index 000000000000..8f04b12b2212 --- /dev/null +++ b/orc8r/cloud/go/test_utils/test_db.go @@ -0,0 +1,55 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package test_utils + +import ( + "database/sql" + "fmt" + "sync" + "testing" + + "magma/orc8r/cloud/go/blobstore" + "magma/orc8r/cloud/go/sqorc" + + _ "github.com/mattn/go-sqlite3" + "github.com/stretchr/testify/assert" +) + +var ( + once sync.Once + instance *sql.DB +) + +// GetSharedTestDB returns a singleton in-memory database connection. +func GetSharedTestDB(t *testing.T) *sql.DB { + once.Do(func() { + db, err := sqorc.Open(blobstore.SQLDriver, ":memory:") + assert.NoError(t, err) + instance = db + }) + return instance +} + +// DropTableFromSharedTestDB drops the table from the singleton in-memory database. +func DropTableFromSharedTestDB(t *testing.T, table string) { + query := fmt.Sprintf("DROP TABLE IF EXISTS %s", table) + _, err := instance.Exec(query) + assert.NoError(t, err) +} + +// NewEntStorage returns a new blobstore storage factory utilizing the singleton in-memory database. +func NewEntStorage(t *testing.T, tableName string) blobstore.BlobStorageFactory { + db := GetSharedTestDB(t) + store := blobstore.NewEntStorage(tableName, db, sqorc.GetSqlBuilder()) + + err := store.InitializeFactory() + assert.NoError(t, err) + + return store +} diff --git a/orc8r/gateway/c/common/CMakeLists.txt b/orc8r/gateway/c/common/CMakeLists.txt index 7898dc56bd25..bc43a4ace1db 100644 --- a/orc8r/gateway/c/common/CMakeLists.txt +++ b/orc8r/gateway/c/common/CMakeLists.txt @@ -14,7 +14,6 @@ include(CMakeProtoMacros.txt) ADD_SUBDIRECTORY(service303) ADD_SUBDIRECTORY(service_registry) ADD_SUBDIRECTORY(async_grpc) -ADD_SUBDIRECTORY(scribe_client) ADD_SUBDIRECTORY(config) ADD_SUBDIRECTORY(datastore) ADD_SUBDIRECTORY(policydb) diff --git a/orc8r/gateway/c/common/scribe_client/CMakeLists.txt b/orc8r/gateway/c/common/scribe_client/CMakeLists.txt deleted file mode 100644 index 28cba74f19b9..000000000000 --- a/orc8r/gateway/c/common/scribe_client/CMakeLists.txt +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2016-present, Facebook, Inc. -# All rights reserved. -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. An additional grant -# of patent rights can be found in the PATENTS file in the same directory. - -add_compile_options(-std=c++11) -include_directories(${CMAKE_CURRENT_BINARY_DIR}) -#compile the relevant protos -create_proto_dir("orc8r" CPP_OUT_DIR) - -list(APPEND PROTO_SRCS "") -list(APPEND PROTO_HDRS "") - -set(SCRIBE_PROTOS common logging_service) -generate_cpp_protos("${SCRIBE_PROTOS}" "${PROTO_SRCS}" "${PROTO_HDRS}" - ${ORC8R_PROTO_DIR} ${CPP_OUT_DIR}) - -set(SCRIBE_GRPC_PROTOS logging_service) -generate_grpc_protos("${SCRIBE_GRPC_PROTOS}" "${PROTO_SRCS}" "${PROTO_HDRS}" - ${ORC8R_PROTO_DIR} ${CPP_OUT_DIR}) - -add_library(SCRIBE_CLIENT - ScribeClient.cpp - ScribeRpcClient.cpp - ${PROTO_SRCS} - ${PROTO_HDRS} -) - -target_link_libraries(SCRIBE_CLIENT SERVICE_REGISTRY ASYNC_GRPC) - -target_include_directories(SCRIBE_CLIENT PUBLIC - ${CMAKE_CURRENT_SOURCE_DIR} -) diff --git a/orc8r/gateway/c/common/scribe_client/ScribeClient.cpp b/orc8r/gateway/c/common/scribe_client/ScribeClient.cpp deleted file mode 100644 index b142cd64c49e..000000000000 --- a/orc8r/gateway/c/common/scribe_client/ScribeClient.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#include -#include -#include -#include - -#include - -#include "ScribeClient.h" -#include "ServiceRegistrySingleton.h" - - -using grpc::Channel; -using grpc::ClientContext; -using grpc::Status; -using magma::LoggingService; -using magma::LoggingServiceClient; -using magma::Void; -using magma::LogRequest; -using magma::LoggerDestination; -using magma::LogEntry; - -LoggingServiceClient::LoggingServiceClient() { - initializeClient(); -} - -LoggingServiceClient &LoggingServiceClient::get_instance() { - static LoggingServiceClient client_instance; - if (client_instance.stub_ == nullptr) { - client_instance.initializeClient(); - } - return client_instance; -} - -void LoggingServiceClient::initializeClient() { - auto channel = ServiceRegistrySingleton::Instance() - ->GetGrpcChannel("logger", ServiceRegistrySingleton::CLOUD); - // Create stub for LoggingService gRPC service - stub_ = LoggingService::NewStub(channel); - stub_ == nullptr; - std::cerr << "Unable to create LoggingServiceClient " << std::endl; - std::thread resp_loop_thread([&]() { rpc_response_loop(); }); - resp_loop_thread.detach(); -} - -bool LoggingServiceClient::shouldLog(float samplingRate) { - srand(time(0)); - return (rand() / (RAND_MAX)) < samplingRate; -} - -int LoggingServiceClient::log_to_scribe( - char const *category, - int time, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate, - std::function callback) { - LoggingServiceClient &client = get_instance(); - if (client.stub_ == nullptr || !client.shouldLog(sampling_rate)) return 0; - LogRequest request; - Void response; - LoggerDestination dest; - if (LoggerDestination_Parse("SCRIBE", &dest)) { - request.set_destination(dest); - } - LogEntry *entry = request.add_entries(); - entry->set_category(category); - entry->set_time(time); - auto strMap = entry->mutable_normal_map(); - for (int i = 0; i < str_params_len; ++i) { - const char *key = str_params[i].key; - const char *val = str_params[i].val; - (*strMap)[key] = val; - } - auto intMap = entry->mutable_int_map(); - for (int i = 0; i < int_params_len; ++i) { - const char *key = int_params[i].key; - int val = int_params[i].val; - (*intMap)[key] = val; - } - // Create a raw response pointer that stores a callback to be called when the - // gRPC call is answered - auto local_response = new AsyncLocalResponse( - std::move(callback), RESPONSE_TIMEOUT); - // Create a response reader for the `Log` RPC call. This reader - // stores the client context, the request to pass in, and the queue to add - // the response to when done - auto response_reader = client.stub_->AsyncLog( - local_response->get_context(), request, &client.queue_); - // Set the reader for the local response. This executes the `Log` - // response using the response reader. When it is done, the callback stored in - // `local_response` will be called - local_response->set_response_reader(std::move(response_reader)); - return 0; -} - -void LoggingServiceClient::log_to_scribe( - std::string category, - time_t time, - std::map int_params, - std::map str_params, - float sampling_rate, - std::function callback) { - LoggingServiceClient &client = get_instance(); - if (client.stub_ == nullptr || !client.shouldLog(sampling_rate)) return; - LogRequest request; - Void response; - LoggerDestination dest; - if (LoggerDestination_Parse("SCRIBE", &dest)) { - request.set_destination(dest); - } - LogEntry *entry = request.add_entries(); - entry->set_category(category); - entry->set_time(time); - auto strMap = entry->mutable_normal_map(); - for (const auto &pair : str_params) { - (*strMap)[pair.first] = pair.second; - } - auto intMap = entry->mutable_int_map(); - for (const auto &pair : int_params) { - (*intMap)[pair.first] = pair.second; - } - // Create a raw response pointer that stores a callback to be called when the - // gRPC call is answered - auto local_response = new AsyncLocalResponse( - std::move(callback), RESPONSE_TIMEOUT); - // Create a response reader for the `Log` RPC call. This reader - // stores the client context, the request to pass in, and the queue to add - // the response to when done - auto response_reader = client.stub_->AsyncLog( - local_response->get_context(), request, &client.queue_); - // Set the reader for the local response. This executes the `Log` - // response using the response reader. When it is done, the callback stored in - // `local_response` will be called - local_response->set_response_reader(std::move(response_reader)); -} diff --git a/orc8r/gateway/c/common/scribe_client/ScribeClient.h b/orc8r/gateway/c/common/scribe_client/ScribeClient.h deleted file mode 100644 index a40e313cbb48..000000000000 --- a/orc8r/gateway/c/common/scribe_client/ScribeClient.h +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - #pragma once - -#include - -#include - -#include "scribe_rpc_client.h" - -#include "GRPCReceiver.h" - -using grpc::Status; - -using grpc::Channel; -using grpc::ClientContext; -using grpc::Status; -using google::protobuf::RepeatedPtrField; -using magma::orc8r::LoggingService; -using magma::orc8r::LogRequest; -using magma::orc8r::LoggerDestination; - - -namespace magma { -using namespace orc8r; -/* - * gRPC client for LoggingService - */ -class LoggingServiceClient : public GRPCReceiver{ - public: - /** - * Log one scribe entry to the given category on scribe. API for C. - * - * @param category: category name of the scribe category to log to. - * @param time: a timestamp associated with the logentry. - * @param int_params[]: an array of scribe_int_param_t, where each - * scribe_int_param_t contains a str name, and a int value. - * @param int_params_len: length of the above array. - * @param str_params[]: an array of scribe_string_param_t, where each - scribe_string_param_t contains a str name, and a str value. - * @param str_params_len: length of the above array. - * @param sampling_rate: a float between 0 and 1 indicating the desired - * samplingRate of the log. The ScribeClient will throw a die with value in - * [0, 1) and drop the attempt to log the entry if the result of the die is - * larger than the samplingRate. - * @param callback: callback function is called when LogToScribe returns - */ - static int log_to_scribe( - char const *category, - int time, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate, - std::function callback); - - /** - * API for C++. Log on scribe entry to the given category on scribe. - * @param category category name of the scribe category to log to. - * @param time a timestamp associated with the logentry. - * @param int_params a map of int parameters with string keys - * @param str_params a map of string parameters with string keys - * @param sampling_rate a float between 0 and 1 indicating the desired - * sampling_rate of the log. The ScribeClient will throw a die with value in - * [0, 1) and drop the attempt to log the entry if the result of the die is - * larger than the sampling_rate. - * @param callback callback function is called when LogToScribe returns - */ - static void log_to_scribe( - std::string category, - time_t time, - std::map int_params, - std::map str_params, - float sampling_rate, - std::function callback); - - public: - LoggingServiceClient(LoggingServiceClient const&) = delete; - void operator=(LoggingServiceClient const&) = delete; - - private: - explicit LoggingServiceClient(); - static LoggingServiceClient& get_instance(); - std::shared_ptr stub_; - bool shouldLog(float samplingRate); - void initializeClient(); - static const uint32_t RESPONSE_TIMEOUT = 3; // seconds -}; - -} // namespace magma diff --git a/orc8r/gateway/c/common/scribe_client/ScribeRpcClient.cpp b/orc8r/gateway/c/common/scribe_client/ScribeRpcClient.cpp deleted file mode 100644 index 512547b78070..000000000000 --- a/orc8r/gateway/c/common/scribe_client/ScribeRpcClient.cpp +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ -#include -#include - -#include "ScribeClient.h" -#include "scribe_rpc_client_for_cpp.h" -#include "ServiceRegistrySingleton.h" - -using magma::LoggingServiceClient; -using magma::ServiceRegistrySingleton; -using magma::LogEntry; - -static void log_to_scribe_done(const grpc::Status& status); - -int log_to_scribe( - char const *category, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len) { - time_t t = time(NULL); - return log_to_scribe_with_time_and_sampling_rate(category, - t, - int_params, - int_params_len, - str_params, - str_params_len, - 1); -} - -int log_to_scribe_with_sampling_rate( - char const *category, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate) { - time_t t = time(NULL); - return log_to_scribe_with_time_and_sampling_rate(category, - t, - int_params, - int_params_len, - str_params, - str_params_len, - sampling_rate); -} - -int log_to_scribe_with_time_and_sampling_rate( - char const *category, - int time, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate) { - int status = LoggingServiceClient::log_to_scribe( - category, - time, - int_params, - int_params_len, - str_params, - str_params_len, - sampling_rate, - [](grpc::Status g_status, magma::Void response) { - log_to_scribe_done(g_status); - }); - return status; -} - -void log_to_scribe_with_time_and_sampling_rate( - std::string category, - time_t time, - std::map int_params, - std::map str_params, - float sampling_rate) { - return LoggingServiceClient::log_to_scribe( - category, - time, - int_params, - str_params, - sampling_rate, - [](grpc::Status status, magma::Void response) { - log_to_scribe_done(status); - }); -} - -void log_to_scribe( - std::string category, - std::map int_params, - std::map str_params) { - time_t t = time(NULL); - return log_to_scribe_with_time_and_sampling_rate( - category, t, int_params, str_params, 1); -} - -static void log_to_scribe_done(const grpc::Status& status) { - if (!status.ok()) { - std::cerr << "log_to_scribe fails with code " << status.error_code() - << ", msg: " << status.error_message() << std::endl; - } -} diff --git a/orc8r/gateway/c/common/scribe_client/scribe_rpc_client.h b/orc8r/gateway/c/common/scribe_client/scribe_rpc_client.h deleted file mode 100644 index ef045e712f03..000000000000 --- a/orc8r/gateway/c/common/scribe_client/scribe_rpc_client.h +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ -#pragma once -#ifdef __cplusplus -extern "C" { -#endif - -// caller is responsible for string memory allocation and release. -typedef struct scribe_int_param { - const char *key; //string - const int val; // integer -} scribe_int_param_t; - -typedef struct scribe_string_param { - const char *key; - const char *val; -} scribe_string_param_t; - -/** - * Log one scribe entry to the given category on scribe. Default current - * timestamp and sampleRate 1 will be used. - * - * @param category: category name of the scribe category to log to. - * @param int_params[]: an array of scribe_int_param_t, where each - * scribe_int_param_t contains a str name, and a int value. - * @param int_params_len: length of the above array. - * @param str_params[]: an array of scribe_string_param_t, where each - scribe_string_param_t contains a str name, and a str value. - * @param str_params_len: length of the above array. - */ -int log_to_scribe( - char const *category, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len); - -/** - * Log one scribe entry to the given category on scribe. Default current - * timestamp will be used. - * - * @param category: category name of the scribe category to log to. - * @param int_params[]: an array of scribe_int_param_t, where each - * scribe_int_param_t contains a str name, and a int value. - * @param int_params_len: length of the above array. - * @param str_params[]: an array of scribe_string_param_t, where each - scribe_string_param_t contains a str name, and a str value. - * @param str_params_len: length of the above array. - * @param sampling_rate: a float between 0 and 1 indicating the desired - * samplingRate of the log. The ScribeClient will throw a die with value in - * [0, 1) and drop the attempt to log the entry if the result of the die is - * larger than the samplingRate. - */ -int log_to_scribe_with_sampling_rate( - char const *category, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate); - -/** - * Log one scribe entry to the given category on scribe with a timestamp. - * - * @param category: category name of the scribe category to log to. - * @param time: a timestamp to associate the log message with. - * @param int_params[]: an array of scribe_int_param_t, where each - * scribe_int_param_t contains a str name, and a int value. - * @param int_params_len: length of the above array. - * @param str_params[]: an array of scribe_string_param_t, where each - scribe_string_param_t contains a str name, and a str value. - * @param str_params_len: length of the above array. - * @param sampling_rate: a float between 0 and 1 indicating the desired - * samplingRate of the log. The ScribeClient will throw a die with value in - * [0, 1) and drop the attempt to log the entry if the result of the die is - * larger than the samplingRate. - */ -int log_to_scribe_with_time_and_sampling_rate( - char const *category, - int time, - scribe_int_param_t *int_params, - int int_params_len, - scribe_string_param_t *str_params, - int str_params_len, - float sampling_rate); - -#ifdef __cplusplus -} -#endif diff --git a/orc8r/gateway/c/common/scribe_client/scribe_rpc_client_for_cpp.h b/orc8r/gateway/c/common/scribe_client/scribe_rpc_client_for_cpp.h deleted file mode 100644 index 05abd4f9e49f..000000000000 --- a/orc8r/gateway/c/common/scribe_client/scribe_rpc_client_for_cpp.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright (c) 2016-present, Facebook, Inc. - * All rights reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. An additional grant - * of patent rights can be found in the PATENTS file in the same directory. - */ - -#ifndef C_SCRIBE_RPC_CLIENT_FOR_CPP_H -#define C_SCRIBE_RPC_CLIENT_FOR_CPP_H - -/** - * C++ API for logging one scribe entry to the given category with a timestamp and a sampling rate . - * @param category category name of the scribe category to log to. - * @param time a timestamp to associate the log message with. - * @param int_params a map of string keys to int values to log - * @param str_params a map of string keys to string values to log - * @param sampling_rate a float between 0 and 1 indicating the desired - * sampling_rate of the log. The ScribeClient will throw a die with value in - * [0, 1) and drop the attempt to log the entry if the result of the die is - * larger than the sampling_rate. - */ -void log_to_scribe_with_time_and_sampling_rate( - std::string category, - time_t time, - std::map int_params, - std::map str_params, - float sampling_rate); - -/** - * C++ API for logging one scribe entry with default timestamp(current time), and sampling rate 1. - * @param category category name of the scribe category to log to. - * @param int_params a map of string keys to int values to log - * @param str_params a map of string keys to string values to log - */ -void log_to_scribe( - std::string category, - std::map int_params, - std::map str_params); - -#endif //C_SCRIBE_RPC_CLIENT_FOR_CPP_H diff --git a/orc8r/gateway/configs/magmad.yml b/orc8r/gateway/configs/magmad.yml index aadffe762dd2..5e4baa3ff39b 100644 --- a/orc8r/gateway/configs/magmad.yml +++ b/orc8r/gateway/configs/magmad.yml @@ -30,12 +30,9 @@ bootstrap_config: enable_config_streamer: True enable_upgrade_manager: False enable_network_monitor: False -enable_systemd_tailer: False enable_sync_rpc: True enable_kernel_version_checking: False -systemd_tailer_poll_interval: 30 # seconds - network_monitor_config: # How long to sleep between statistic collections sampling_period: 60 diff --git a/orc8r/gateway/go/services/sync_rpc/service/sync_rpc_client_test.go b/orc8r/gateway/go/services/sync_rpc/service/sync_rpc_client_test.go index 6878555f27f1..7b9a5888b9d8 100644 --- a/orc8r/gateway/go/services/sync_rpc/service/sync_rpc_client_test.go +++ b/orc8r/gateway/go/services/sync_rpc/service/sync_rpc_client_test.go @@ -39,9 +39,6 @@ func (svc *testSyncRpcService) EstablishSyncRPCStream(stream protos.SyncRPCServi func (svc *testSyncRpcService) SyncRPC(stream protos.SyncRPCService_SyncRPCServer) error { return nil } -func (svc *testSyncRpcService) GetHostnameForHwid(ctx context.Context, hwid *protos.HardwareID) (*protos.Hostname, error) { - return &protos.Hostname{}, nil -} // run instance of the test grpc service func runTestSyncRpcService(server *testSyncRpcService, grpcPortCh chan string) { diff --git a/orc8r/gateway/python/defs.mk b/orc8r/gateway/python/defs.mk index 7545867cf076..b6c67e74827e 100644 --- a/orc8r/gateway/python/defs.mk +++ b/orc8r/gateway/python/defs.mk @@ -18,7 +18,6 @@ TESTS=magma/common/redis/tests \ magma/magmad/check/machine_check/tests \ magma/magmad/check/network_check/tests \ magma/magmad/tests \ - magma/magmad/logging/tests \ magma/magmad/upgrade/tests \ magma/magmad/generic_command/tests \ magma/state/tests \ diff --git a/orc8r/gateway/python/magma/common/scribe_client.py b/orc8r/gateway/python/magma/common/scribe_client.py deleted file mode 100644 index ead09e44660a..000000000000 --- a/orc8r/gateway/python/magma/common/scribe_client.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" -import asyncio -import logging - -import random -from orc8r.protos.logging_service_pb2 import LogRequest, LoggerDestination -from orc8r.protos.logging_service_pb2_grpc import LoggingServiceStub - -from magma.common.service_registry import ServiceRegistry - - -class ScribeClient(object): - """ - ScribeClient is the client_api to call Log() into logging_service on cloud. - User is responsible of formatting a list of LogEntry. - """ - def __init__(self, loop=None): - self._loop = loop if loop else asyncio.get_event_loop() - - @staticmethod - def should_log(sampling_rate): - return random.random() < sampling_rate - - def log_to_scribe_with_sampling_rate(self, entries, sampling_rate=1): - """ - Client API to log entries to scribe. - Args: - entries: a list of LogEntry, where each contains a - category(str), - a required timestamp(int), - an optional int_map(map), - an optional normal_map(map, - an optional tag_set(arr of str), - an optional normvector(arr of str), - and an optional hw_id(str). - to be sent. - sampling_rate: defaults to 1, and will be logged always if - it's 1. Otherwise, all entries of this - specific call will be logged or dropped - based on a coin flip. - - Returns: - n/a. If an exception has occurred, the error will be logged. - - """ - self.log_entries_to_dest(entries, - LoggerDestination.Value("SCRIBE"), - sampling_rate) - - def log_entries_to_dest(self, entries, destination, sampling_rate=1): - """ - Client API to log entries to destination. - Args: - entries: a list of LogEntry, where each contains a - category(str), - a required timestamp(int), - an optional int_map(map), - an optional normal_map(map, - an optional tag_set(arr of str), - an optional normvector(arr of str), - and an optional hw_id(str). - to be sent. - destination: the LoggerDestination to log to. Has to be - defined as a enum in LoggerDestination - in the proto file. - sampling_rate: defaults to 1, and will be logged always if - it's 1. Otherwise, all entries of this - specific call will be logged or dropped - based on a coin flip. - - Returns: - n/a. If an exception has occurred, the error will be logged. - """ - if not self.should_log(sampling_rate): - return - chan = ServiceRegistry.get_rpc_channel('logger', - ServiceRegistry.CLOUD) - client = LoggingServiceStub(chan) - log_request = LogRequest( - Entries=entries, - Destination=destination - ) - future = client.Log.future(log_request) - future.add_done_callback(lambda future: - self._loop.call_soon_threadsafe( - self.log_done, future)) - - def log_done(self, log_future): - """ - Log callback to handle exceptions - """ - err = log_future.exception() - if err: - logging.error("Log Error! [%s] %s", - err.code(), err.details()) diff --git a/orc8r/gateway/python/magma/common/tests/scribe_client_test.py b/orc8r/gateway/python/magma/common/tests/scribe_client_test.py deleted file mode 100644 index ba5e37935ef9..000000000000 --- a/orc8r/gateway/python/magma/common/tests/scribe_client_test.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" -# pylint: disable=protected-access - -import asyncio -import unittest -import unittest.mock - -from orc8r.protos.logging_service_pb2 import LogEntry, LogRequest, \ - LoggerDestination - -from magma.common.scribe_client import ScribeClient -from magma.common.service_registry import ServiceRegistry - - -class ScribeClientTests(unittest.TestCase): - """ - Tests for the ScribeClient - """ - def setUp(self): - self._scribe_client = ScribeClient(loop=asyncio.new_event_loop()) - ServiceRegistry.add_service('test', '0.0.0.0', 0) - ServiceRegistry._PROXY_CONFIG = {'local_port': 1234, - 'cloud_address': 'test', - 'proxy_cloud_connections': True} - - @unittest.mock.patch('magma.common.scribe_client.LoggingServiceStub') - def test_log_entries_to_dest(self, logging_service_mock_stub): - """ - Test if the service starts and stops gracefully. - """ - # mock out Log.future - mock = unittest.mock.Mock() - mock.Log.future.side_effect = [unittest.mock.Mock()] - logging_service_mock_stub.side_effect = [mock] - data = {} - data['int'] = {"some_field": 456} - data['normal'] = {"imsi": "IMSI11111111", "ue_state": "IDLE"} - entries = [LogEntry( - category="test_category", - int_map={"some_field": 456}, - normal_map={"imsi": "IMSI11111111", "ue_state": "IDLE"}, - time=12345, - )] - self._scribe_client.log_to_scribe_with_sampling_rate(entries, 0) - mock.Log.future.assert_not_called() - - self._scribe_client.log_to_scribe_with_sampling_rate(entries, 1) - mock.Log.future.assert_called_once_with( - LogRequest( - Entries=entries, - Destination=LoggerDestination.Value("SCRIBE") - ) - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/orc8r/gateway/python/magma/magmad/logging/__init__.py b/orc8r/gateway/python/magma/magmad/logging/__init__.py deleted file mode 100644 index 0dcd22e63c58..000000000000 --- a/orc8r/gateway/python/magma/magmad/logging/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" diff --git a/orc8r/gateway/python/magma/magmad/logging/systemd_tailer.py b/orc8r/gateway/python/magma/magmad/logging/systemd_tailer.py deleted file mode 100644 index eb194568f2a5..000000000000 --- a/orc8r/gateway/python/magma/magmad/logging/systemd_tailer.py +++ /dev/null @@ -1,73 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" - -import asyncio -import logging -import snowflake -from orc8r.protos.logging_service_pb2 import LogEntry -from systemd import journal -from magma.common.scribe_client import ScribeClient - -SERVICE_EXIT_CATEGORY = "perfpipe_magma_gateway_service_exit" - - -class JournalEntryParser(object): - """ - Utility class for parsing journalctl entries into log events for scribe - """ - - @staticmethod - def entry_to_log_event(entry): - time = entry['_SOURCE_REALTIME_TIMESTAMP'].timestamp() - hw_id = "" if snowflake.snowflake() is None else snowflake.snowflake() - int_map = {'exit_status': entry['EXIT_STATUS']} - normal_map = {'unit': entry['UNIT'], - 'exit_code': entry["EXIT_CODE"]} - return LogEntry(category=SERVICE_EXIT_CATEGORY, - time=int(time), - hw_id=hw_id, - normal_map=normal_map, - int_map=int_map) - - -@asyncio.coroutine -def start_systemd_tailer(magmad_service_config): - """ - Tail systemd logs for exit statuses and codes, then report them to - scribe - - Args: - magmad_service (magma.common.service.MagmaService): - MagmaService instance for magmad - """ - loop = asyncio.get_event_loop() - scribe_client = ScribeClient(loop=loop) - poll_interval = magmad_service_config.get('systemd_tailer_poll_interval', - 10) - - reader = journal.Reader() - reader.log_level(journal.LOG_INFO) - # Only include entries since the current box has booted. - reader.this_boot() - reader.this_machine() - reader.add_match( - SYSLOG_IDENTIFIER=u'systemd', - CODE_FUNCTION=u'service_sigchld_event' - ) - # Move to the end of the journal - reader.seek_tail() - # Discard old journal entries - reader.get_previous() - while True: - if reader.wait() == journal.APPEND: - logging.debug("Found systemd exit error codes, reporting to scribe") - log_events = [JournalEntryParser.entry_to_log_event(e) - for e in reader] - scribe_client.log_to_scribe_with_sampling_rate(log_events) - yield from asyncio.sleep(poll_interval, loop=loop) diff --git a/orc8r/gateway/python/magma/magmad/logging/tests/__init__.py b/orc8r/gateway/python/magma/magmad/logging/tests/__init__.py deleted file mode 100644 index 0dcd22e63c58..000000000000 --- a/orc8r/gateway/python/magma/magmad/logging/tests/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" diff --git a/orc8r/gateway/python/magma/magmad/logging/tests/test_systemd_tailer.py b/orc8r/gateway/python/magma/magmad/logging/tests/test_systemd_tailer.py deleted file mode 100644 index c330c08088f7..000000000000 --- a/orc8r/gateway/python/magma/magmad/logging/tests/test_systemd_tailer.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -Copyright (c) 2016-present, Facebook, Inc. -All rights reserved. - -This source code is licensed under the BSD-style license found in the -LICENSE file in the root directory of this source tree. An additional grant -of patent rights can be found in the PATENTS file in the same directory. -""" -import datetime -import unittest.mock -from unittest import TestCase -from uuid import UUID - -from orc8r.protos.logging_service_pb2 import LogEntry -from systemd import journal - -from magma.magmad.logging.systemd_tailer import JournalEntryParser - - -class SystemdTailerTest(TestCase): - """ - Tests for the systemd tailer - """ - MOCK_JOURNAL_ENTRY = { - 'EXIT_STATUS': 1, - '__MONOTONIC_TIMESTAMP': journal.Monotonic(( - datetime.timedelta(0, 1281, 913613), - UUID('7ed2f5e9-e027-4ab8-b0bb-5bd62966b421'))), - '_TRANSPORT': 'journal', - 'CODE_FILE': '../src/core/service.c', - '_COMM': 'systemd', - '_CAP_EFFECTIVE': '3fffffffff', - '_MACHINE_ID': UUID('7914936b-cb62-428b-b94c-6cb7fefd283a'), - '_BOOT_ID': UUID('7ed2f5e9-e027-4ab8-b0bb-5bd62966b421'), - 'EXIT_CODE': 'exited', - 'UNIT': 'magma@mobilityd.service', - '_GID': 0, - '_PID': 1, - '_SYSTEMD_CGROUP': '/init.scope', - '_CMDLINE': '/sbin/init', - 'SYSLOG_IDENTIFIER': 'systemd', - 'SYSLOG_FACILITY': 3, '_UID': 0, - 'CODE_FUNCTION': 'service_sigchld_event', - '_SYSTEMD_UNIT': 'init.scope', - 'MESSAGE': - 'magma@mobilityd.service:' - 'Main process exited, code=exited, status=1/FAILURE', - '_EXE': '/lib/systemd/systemd', - '_SOURCE_REALTIME_TIMESTAMP': datetime.datetime( - 2018, 1, 5, 17, 28, 17, 975170), - 'CODE_LINE': 2681, - '_HOSTNAME': 'magma-dev', - '_SYSTEMD_SLICE': '-.slice', - '__REALTIME_TIMESTAMP': datetime.datetime( - 2018, 1, 5, 17, 28, 17, 975200), - 'PRIORITY': 5} - GATEWAY_ID = "test ID" - MOCK_LOG_ENTRY = LogEntry( - category='perfpipe_magma_gateway_service_exit', - time=int(datetime.datetime( - 2018, 1, 5, 17, 28, 17, 975170).timestamp()), - hw_id=GATEWAY_ID, - int_map={'exit_status': 1}, - normal_map={'unit': 'magma@mobilityd.service', 'exit_code': 'exited'}) - - def test_journal_entry_parser(self): - """ - Test that mconfig updates are handled correctly - """ - with unittest.mock.patch('snowflake.snowflake') as mock_snowflake: - mock_snowflake.side_effect = lambda: self.GATEWAY_ID - parsed_entry = JournalEntryParser.entry_to_log_event( - self.MOCK_JOURNAL_ENTRY) - self.assertEqual(parsed_entry, self.MOCK_LOG_ENTRY) diff --git a/orc8r/gateway/python/magma/magmad/main.py b/orc8r/gateway/python/magma/magmad/main.py index 523d5bece430..247ff8692813 100644 --- a/orc8r/gateway/python/magma/magmad/main.py +++ b/orc8r/gateway/python/magma/magmad/main.py @@ -19,7 +19,6 @@ get_mconfig_manager from magma.magmad.generic_command.command_executor import \ get_command_executor_impl -from magma.magmad.logging.systemd_tailer import start_systemd_tailer from magma.magmad.upgrade.upgrader import UpgraderFactory, start_upgrade_loop from orc8r.protos.mconfig import mconfigs_pb2 from orc8r.protos.state_pb2_grpc import StateServiceStub @@ -181,9 +180,6 @@ async def bootstrap_success_cb(certs_generated: bool): if service.config.get('enable_network_monitor', False): service.loop.create_task(metrics_collection_loop(service.config)) - if service.config.get('enable_systemd_tailer', False): - service.loop.create_task(start_systemd_tailer(service.config)) - # Create generic command executor command_executor = None if service.config.get('generic_command_config', None): diff --git a/orc8r/gateway/python/setup.py b/orc8r/gateway/python/setup.py index 5cac40e8a75a..00dab367a06f 100644 --- a/orc8r/gateway/python/setup.py +++ b/orc8r/gateway/python/setup.py @@ -31,7 +31,6 @@ 'magma.magmad.check.kernel_check', 'magma.magmad.check.machine_check', 'magma.magmad.check.network_check', - 'magma.magmad.logging', 'magma.magmad.upgrade', 'magma.state', 'magma.eventd', diff --git a/orc8r/lib/go/definitions/env.go b/orc8r/lib/go/definitions/env.go new file mode 100644 index 000000000000..72a990d21d1e --- /dev/null +++ b/orc8r/lib/go/definitions/env.go @@ -0,0 +1,19 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package definitions + +import "os" + +func GetEnvWithDefault(variable string, defaultValue string) string { + value := os.Getenv(variable) + if len(value) == 0 { + value = defaultValue + } + return value +} diff --git a/orc8r/lib/go/protos/directoryd.pb.go b/orc8r/lib/go/protos/directoryd.pb.go index 16a32c55606d..eed8bad23063 100644 --- a/orc8r/lib/go/protos/directoryd.pb.go +++ b/orc8r/lib/go/protos/directoryd.pb.go @@ -24,12 +24,256 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package -// -------------------------------------------------------------------------- -// Directory Service -// -// -// -// -------------------------------------------------------------------------- +type GetHostnameForHWIDRequest struct { + Hwid string `protobuf:"bytes,1,opt,name=hwid,proto3" json:"hwid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameForHWIDRequest) Reset() { *m = GetHostnameForHWIDRequest{} } +func (m *GetHostnameForHWIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameForHWIDRequest) ProtoMessage() {} +func (*GetHostnameForHWIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{0} +} + +func (m *GetHostnameForHWIDRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameForHWIDRequest.Unmarshal(m, b) +} +func (m *GetHostnameForHWIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameForHWIDRequest.Marshal(b, m, deterministic) +} +func (m *GetHostnameForHWIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameForHWIDRequest.Merge(m, src) +} +func (m *GetHostnameForHWIDRequest) XXX_Size() int { + return xxx_messageInfo_GetHostnameForHWIDRequest.Size(m) +} +func (m *GetHostnameForHWIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameForHWIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameForHWIDRequest proto.InternalMessageInfo + +func (m *GetHostnameForHWIDRequest) GetHwid() string { + if m != nil { + return m.Hwid + } + return "" +} + +type GetHostnameForHWIDResponse struct { + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameForHWIDResponse) Reset() { *m = GetHostnameForHWIDResponse{} } +func (m *GetHostnameForHWIDResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameForHWIDResponse) ProtoMessage() {} +func (*GetHostnameForHWIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{1} +} + +func (m *GetHostnameForHWIDResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameForHWIDResponse.Unmarshal(m, b) +} +func (m *GetHostnameForHWIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameForHWIDResponse.Marshal(b, m, deterministic) +} +func (m *GetHostnameForHWIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameForHWIDResponse.Merge(m, src) +} +func (m *GetHostnameForHWIDResponse) XXX_Size() int { + return xxx_messageInfo_GetHostnameForHWIDResponse.Size(m) +} +func (m *GetHostnameForHWIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameForHWIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameForHWIDResponse proto.InternalMessageInfo + +func (m *GetHostnameForHWIDResponse) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +type MapHWIDToHostnameRequest struct { + HwidToHostname map[string]string `protobuf:"bytes,1,rep,name=hwidToHostname,proto3" json:"hwidToHostname,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MapHWIDToHostnameRequest) Reset() { *m = MapHWIDToHostnameRequest{} } +func (m *MapHWIDToHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*MapHWIDToHostnameRequest) ProtoMessage() {} +func (*MapHWIDToHostnameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{2} +} + +func (m *MapHWIDToHostnameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MapHWIDToHostnameRequest.Unmarshal(m, b) +} +func (m *MapHWIDToHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MapHWIDToHostnameRequest.Marshal(b, m, deterministic) +} +func (m *MapHWIDToHostnameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MapHWIDToHostnameRequest.Merge(m, src) +} +func (m *MapHWIDToHostnameRequest) XXX_Size() int { + return xxx_messageInfo_MapHWIDToHostnameRequest.Size(m) +} +func (m *MapHWIDToHostnameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MapHWIDToHostnameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MapHWIDToHostnameRequest proto.InternalMessageInfo + +func (m *MapHWIDToHostnameRequest) GetHwidToHostname() map[string]string { + if m != nil { + return m.HwidToHostname + } + return nil +} + +type GetIMSIForSessionIDRequest struct { + NetworkID string `protobuf:"bytes,1,opt,name=networkID,proto3" json:"networkID,omitempty"` + SessionID string `protobuf:"bytes,2,opt,name=sessionID,proto3" json:"sessionID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetIMSIForSessionIDRequest) Reset() { *m = GetIMSIForSessionIDRequest{} } +func (m *GetIMSIForSessionIDRequest) String() string { return proto.CompactTextString(m) } +func (*GetIMSIForSessionIDRequest) ProtoMessage() {} +func (*GetIMSIForSessionIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{3} +} + +func (m *GetIMSIForSessionIDRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetIMSIForSessionIDRequest.Unmarshal(m, b) +} +func (m *GetIMSIForSessionIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetIMSIForSessionIDRequest.Marshal(b, m, deterministic) +} +func (m *GetIMSIForSessionIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetIMSIForSessionIDRequest.Merge(m, src) +} +func (m *GetIMSIForSessionIDRequest) XXX_Size() int { + return xxx_messageInfo_GetIMSIForSessionIDRequest.Size(m) +} +func (m *GetIMSIForSessionIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetIMSIForSessionIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetIMSIForSessionIDRequest proto.InternalMessageInfo + +func (m *GetIMSIForSessionIDRequest) GetNetworkID() string { + if m != nil { + return m.NetworkID + } + return "" +} + +func (m *GetIMSIForSessionIDRequest) GetSessionID() string { + if m != nil { + return m.SessionID + } + return "" +} + +type GetIMSIForSessionIDResponse struct { + Imsi string `protobuf:"bytes,1,opt,name=imsi,proto3" json:"imsi,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetIMSIForSessionIDResponse) Reset() { *m = GetIMSIForSessionIDResponse{} } +func (m *GetIMSIForSessionIDResponse) String() string { return proto.CompactTextString(m) } +func (*GetIMSIForSessionIDResponse) ProtoMessage() {} +func (*GetIMSIForSessionIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{4} +} + +func (m *GetIMSIForSessionIDResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetIMSIForSessionIDResponse.Unmarshal(m, b) +} +func (m *GetIMSIForSessionIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetIMSIForSessionIDResponse.Marshal(b, m, deterministic) +} +func (m *GetIMSIForSessionIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetIMSIForSessionIDResponse.Merge(m, src) +} +func (m *GetIMSIForSessionIDResponse) XXX_Size() int { + return xxx_messageInfo_GetIMSIForSessionIDResponse.Size(m) +} +func (m *GetIMSIForSessionIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetIMSIForSessionIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetIMSIForSessionIDResponse proto.InternalMessageInfo + +func (m *GetIMSIForSessionIDResponse) GetImsi() string { + if m != nil { + return m.Imsi + } + return "" +} + +type MapSessionIDToIMSIRequest struct { + NetworkID string `protobuf:"bytes,1,opt,name=networkID,proto3" json:"networkID,omitempty"` + SessionIDToIMSI map[string]string `protobuf:"bytes,2,rep,name=sessionIDToIMSI,proto3" json:"sessionIDToIMSI,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MapSessionIDToIMSIRequest) Reset() { *m = MapSessionIDToIMSIRequest{} } +func (m *MapSessionIDToIMSIRequest) String() string { return proto.CompactTextString(m) } +func (*MapSessionIDToIMSIRequest) ProtoMessage() {} +func (*MapSessionIDToIMSIRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f02336ef077163fd, []int{5} +} + +func (m *MapSessionIDToIMSIRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MapSessionIDToIMSIRequest.Unmarshal(m, b) +} +func (m *MapSessionIDToIMSIRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MapSessionIDToIMSIRequest.Marshal(b, m, deterministic) +} +func (m *MapSessionIDToIMSIRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_MapSessionIDToIMSIRequest.Merge(m, src) +} +func (m *MapSessionIDToIMSIRequest) XXX_Size() int { + return xxx_messageInfo_MapSessionIDToIMSIRequest.Size(m) +} +func (m *MapSessionIDToIMSIRequest) XXX_DiscardUnknown() { + xxx_messageInfo_MapSessionIDToIMSIRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_MapSessionIDToIMSIRequest proto.InternalMessageInfo + +func (m *MapSessionIDToIMSIRequest) GetNetworkID() string { + if m != nil { + return m.NetworkID + } + return "" +} + +func (m *MapSessionIDToIMSIRequest) GetSessionIDToIMSI() map[string]string { + if m != nil { + return m.SessionIDToIMSI + } + return nil +} + type UpdateRecordRequest struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` @@ -43,7 +287,7 @@ func (m *UpdateRecordRequest) Reset() { *m = UpdateRecordRequest{} } func (m *UpdateRecordRequest) String() string { return proto.CompactTextString(m) } func (*UpdateRecordRequest) ProtoMessage() {} func (*UpdateRecordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{0} + return fileDescriptor_f02336ef077163fd, []int{6} } func (m *UpdateRecordRequest) XXX_Unmarshal(b []byte) error { @@ -97,7 +341,7 @@ func (m *DirectoryField) Reset() { *m = DirectoryField{} } func (m *DirectoryField) String() string { return proto.CompactTextString(m) } func (*DirectoryField) ProtoMessage() {} func (*DirectoryField) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{1} + return fileDescriptor_f02336ef077163fd, []int{7} } func (m *DirectoryField) XXX_Unmarshal(b []byte) error { @@ -143,7 +387,7 @@ func (m *DeleteRecordRequest) Reset() { *m = DeleteRecordRequest{} } func (m *DeleteRecordRequest) String() string { return proto.CompactTextString(m) } func (*DeleteRecordRequest) ProtoMessage() {} func (*DeleteRecordRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{2} + return fileDescriptor_f02336ef077163fd, []int{8} } func (m *DeleteRecordRequest) XXX_Unmarshal(b []byte) error { @@ -183,7 +427,7 @@ func (m *GetDirectoryFieldRequest) Reset() { *m = GetDirectoryFieldReque func (m *GetDirectoryFieldRequest) String() string { return proto.CompactTextString(m) } func (*GetDirectoryFieldRequest) ProtoMessage() {} func (*GetDirectoryFieldRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{3} + return fileDescriptor_f02336ef077163fd, []int{9} } func (m *GetDirectoryFieldRequest) XXX_Unmarshal(b []byte) error { @@ -231,7 +475,7 @@ func (m *DirectoryRecord) Reset() { *m = DirectoryRecord{} } func (m *DirectoryRecord) String() string { return proto.CompactTextString(m) } func (*DirectoryRecord) ProtoMessage() {} func (*DirectoryRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{4} + return fileDescriptor_f02336ef077163fd, []int{10} } func (m *DirectoryRecord) XXX_Unmarshal(b []byte) error { @@ -284,7 +528,7 @@ func (m *AllDirectoryRecords) Reset() { *m = AllDirectoryRecords{} } func (m *AllDirectoryRecords) String() string { return proto.CompactTextString(m) } func (*AllDirectoryRecords) ProtoMessage() {} func (*AllDirectoryRecords) Descriptor() ([]byte, []int) { - return fileDescriptor_f02336ef077163fd, []int{5} + return fileDescriptor_f02336ef077163fd, []int{11} } func (m *AllDirectoryRecords) XXX_Unmarshal(b []byte) error { @@ -313,6 +557,14 @@ func (m *AllDirectoryRecords) GetRecords() []*DirectoryRecord { } func init() { + proto.RegisterType((*GetHostnameForHWIDRequest)(nil), "magma.orc8r.GetHostnameForHWIDRequest") + proto.RegisterType((*GetHostnameForHWIDResponse)(nil), "magma.orc8r.GetHostnameForHWIDResponse") + proto.RegisterType((*MapHWIDToHostnameRequest)(nil), "magma.orc8r.MapHWIDToHostnameRequest") + proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.MapHWIDToHostnameRequest.HwidToHostnameEntry") + proto.RegisterType((*GetIMSIForSessionIDRequest)(nil), "magma.orc8r.GetIMSIForSessionIDRequest") + proto.RegisterType((*GetIMSIForSessionIDResponse)(nil), "magma.orc8r.GetIMSIForSessionIDResponse") + proto.RegisterType((*MapSessionIDToIMSIRequest)(nil), "magma.orc8r.MapSessionIDToIMSIRequest") + proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.MapSessionIDToIMSIRequest.SessionIDToIMSIEntry") proto.RegisterType((*UpdateRecordRequest)(nil), "magma.orc8r.UpdateRecordRequest") proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.UpdateRecordRequest.FieldsEntry") proto.RegisterType((*DirectoryField)(nil), "magma.orc8r.DirectoryField") @@ -326,34 +578,51 @@ func init() { func init() { proto.RegisterFile("orc8r/protos/directoryd.proto", fileDescriptor_f02336ef077163fd) } var fileDescriptor_f02336ef077163fd = []byte{ - // 432 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcf, 0x8b, 0xd3, 0x40, - 0x18, 0x6d, 0x12, 0x5c, 0xed, 0x57, 0xd9, 0x1f, 0x53, 0xd1, 0x6c, 0xaa, 0x10, 0x06, 0x16, 0x22, - 0x48, 0x02, 0x2b, 0x48, 0xf5, 0xa4, 0xd2, 0x35, 0x82, 0x88, 0x10, 0x51, 0xd0, 0xcb, 0x92, 0xcd, - 0x7c, 0xae, 0x83, 0x49, 0x67, 0x9d, 0xcc, 0x56, 0xf2, 0xef, 0x79, 0xf3, 0xec, 0x3f, 0x24, 0x99, - 0x24, 0x25, 0x49, 0x53, 0xeb, 0xc1, 0x53, 0x66, 0x1e, 0x6f, 0xde, 0x7b, 0xf3, 0xf2, 0x31, 0xf0, - 0x40, 0xc8, 0x64, 0x2e, 0x83, 0x2b, 0x29, 0x94, 0xc8, 0x03, 0xc6, 0x25, 0x26, 0x4a, 0xc8, 0x82, - 0xf9, 0x1a, 0x21, 0x93, 0x2c, 0xbe, 0xcc, 0x62, 0x5f, 0x93, 0x9c, 0xe3, 0x0e, 0x37, 0x11, 0x59, - 0x26, 0x96, 0x15, 0x8f, 0xfe, 0x34, 0x60, 0xfa, 0xe1, 0x8a, 0xc5, 0x0a, 0x23, 0x4c, 0x84, 0x64, - 0x11, 0x7e, 0xbf, 0xc6, 0x5c, 0x91, 0x7d, 0x30, 0x39, 0xb3, 0x0d, 0xd7, 0xf0, 0xc6, 0x91, 0xc9, - 0x19, 0x71, 0xe0, 0x56, 0x2a, 0x92, 0x58, 0x71, 0xb1, 0xb4, 0x4d, 0x8d, 0xae, 0xf7, 0x64, 0x01, - 0x7b, 0x5f, 0x38, 0xa6, 0x2c, 0xb7, 0x2d, 0xd7, 0xf2, 0x26, 0xa7, 0x8f, 0xfc, 0x96, 0xb9, 0x3f, - 0xa0, 0xee, 0xbf, 0xd2, 0xf4, 0xb3, 0xa5, 0x92, 0x45, 0x54, 0x9f, 0x75, 0x9e, 0xc2, 0xa4, 0x05, - 0x93, 0x43, 0xb0, 0xbe, 0x61, 0x51, 0x27, 0x28, 0x97, 0xe4, 0x0e, 0xdc, 0x58, 0xc5, 0xe9, 0x35, - 0xd6, 0xfe, 0xd5, 0xe6, 0x99, 0x39, 0x37, 0xe8, 0x1c, 0xf6, 0x17, 0x4d, 0x01, 0x5a, 0xe3, 0x5f, - 0x4f, 0xd3, 0x13, 0x98, 0x2e, 0x30, 0xc5, 0x1d, 0xb7, 0xa7, 0x21, 0xd8, 0x21, 0xaa, 0xae, 0xc7, - 0xb6, 0xa6, 0x66, 0x30, 0xd6, 0x37, 0x3a, 0x2f, 0x03, 0xd4, 0x55, 0x69, 0xe0, 0x0d, 0x16, 0xf4, - 0x97, 0x01, 0x07, 0x6b, 0x99, 0xca, 0x73, 0x43, 0xe0, 0x21, 0x1c, 0x36, 0xd5, 0x9e, 0x7f, 0xe5, - 0x79, 0xc9, 0xb4, 0x4d, 0xd7, 0xf2, 0xc6, 0xd1, 0x41, 0x83, 0xbf, 0xae, 0x60, 0xf2, 0xbc, 0xd7, - 0xbc, 0xd7, 0x69, 0xbe, 0x67, 0xf4, 0xbf, 0x5b, 0x7f, 0x0b, 0xd3, 0x17, 0x69, 0xda, 0x33, 0xc9, - 0xc9, 0x13, 0xb8, 0x29, 0xab, 0xa5, 0x6d, 0xe8, 0x50, 0xf7, 0xff, 0x16, 0x2a, 0x6a, 0xc8, 0xa7, - 0xbf, 0x4d, 0xb8, 0x17, 0xc6, 0x0a, 0x7f, 0xc4, 0xc5, 0x9a, 0xf3, 0x1e, 0xe5, 0x8a, 0x27, 0x48, - 0xce, 0xe0, 0x76, 0x7b, 0x8c, 0x88, 0xbb, 0x6b, 0xc2, 0x9c, 0xa3, 0x0e, 0xe3, 0xa3, 0xe0, 0x8c, - 0x8e, 0x4a, 0x99, 0xf6, 0xdf, 0xee, 0xc9, 0x0c, 0x0c, 0xc2, 0xb0, 0xcc, 0x27, 0x38, 0xda, 0x98, - 0x06, 0x72, 0xd2, 0x61, 0x6e, 0x9b, 0x16, 0x67, 0x36, 0x5c, 0x86, 0xe6, 0xd0, 0x11, 0x79, 0x07, - 0x77, 0x43, 0x54, 0x43, 0xb5, 0x6e, 0x26, 0x71, 0xba, 0xf1, 0x07, 0x0e, 0xd1, 0xd1, 0xcb, 0xd9, - 0xe7, 0x63, 0x4d, 0x0a, 0xaa, 0x27, 0x20, 0xe5, 0x17, 0xc1, 0xa5, 0xa8, 0x5f, 0x82, 0x8b, 0x3d, - 0xfd, 0x7d, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xc6, 0x90, 0xb8, 0x4c, 0x04, 0x00, 0x00, + // 703 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0xd3, 0x30, + 0x14, 0x6e, 0xda, 0x31, 0xe8, 0xd9, 0xb4, 0x1f, 0x77, 0x82, 0x34, 0x1d, 0xd2, 0x64, 0x69, 0x50, + 0x24, 0x94, 0x8a, 0x21, 0xa1, 0x0e, 0x6e, 0xd8, 0xd4, 0xad, 0xad, 0xa0, 0x42, 0xb4, 0xe3, 0xf7, + 0x66, 0xca, 0x1a, 0xb3, 0x85, 0xa5, 0x71, 0xb1, 0xbd, 0x4d, 0x7d, 0x09, 0x9e, 0x09, 0x71, 0xc7, + 0x35, 0xaf, 0xc0, 0x83, 0xa0, 0x38, 0x4e, 0xda, 0xa4, 0x2e, 0xdd, 0x24, 0xae, 0xea, 0x1c, 0x7f, + 0xe7, 0xf3, 0x39, 0x9f, 0x7d, 0x3e, 0x15, 0xee, 0x53, 0xd6, 0xaf, 0xb3, 0xda, 0x90, 0x51, 0x41, + 0x79, 0xcd, 0xf5, 0x18, 0xe9, 0x0b, 0xca, 0x46, 0xae, 0x2d, 0x23, 0x68, 0x69, 0xe0, 0x9c, 0x0e, + 0x1c, 0x5b, 0x82, 0xac, 0x72, 0x0a, 0xdb, 0xa7, 0x83, 0x01, 0x0d, 0x22, 0x1c, 0xae, 0x41, 0xb9, + 0x49, 0x44, 0x8b, 0x72, 0x11, 0x38, 0x03, 0x72, 0x48, 0x59, 0xeb, 0x43, 0xbb, 0xd1, 0x25, 0xdf, + 0x2e, 0x08, 0x17, 0x08, 0xc1, 0xc2, 0xd9, 0x95, 0xe7, 0x9a, 0xc6, 0x96, 0x51, 0x2d, 0x76, 0xe5, + 0x1a, 0xd7, 0xc1, 0xd2, 0x25, 0xf0, 0x21, 0x0d, 0x38, 0x41, 0x16, 0xdc, 0x39, 0x53, 0x5b, 0x2a, + 0x2b, 0xf9, 0xc6, 0x3f, 0x0c, 0x30, 0x3b, 0xce, 0x30, 0xc4, 0x1f, 0xd1, 0x98, 0x20, 0x3e, 0xca, + 0x81, 0x95, 0x90, 0x7e, 0xbc, 0x61, 0x1a, 0x5b, 0x85, 0xea, 0xd2, 0xce, 0xae, 0x3d, 0xd1, 0x88, + 0x3d, 0x2b, 0xdd, 0x6e, 0xa5, 0x72, 0x0f, 0x02, 0xc1, 0x46, 0xdd, 0x0c, 0xa1, 0xb5, 0x07, 0x25, + 0x0d, 0x0c, 0xad, 0x41, 0xe1, 0x9c, 0x8c, 0x54, 0xb5, 0xe1, 0x12, 0x6d, 0xc0, 0xad, 0x4b, 0xc7, + 0xbf, 0x20, 0x66, 0x5e, 0xc6, 0xa2, 0x8f, 0xe7, 0xf9, 0xba, 0x81, 0x3f, 0xca, 0xe6, 0xdb, 0x9d, + 0x5e, 0xfb, 0x90, 0xb2, 0x1e, 0xe1, 0xdc, 0xa3, 0xc1, 0x58, 0xae, 0x4d, 0x28, 0x06, 0x44, 0x5c, + 0x51, 0x76, 0xde, 0x6e, 0x28, 0xbe, 0x71, 0x20, 0xdc, 0xe5, 0x71, 0x86, 0x62, 0x1e, 0x07, 0xf0, + 0x13, 0xa8, 0x68, 0x99, 0x95, 0xae, 0x08, 0x16, 0xbc, 0x01, 0xf7, 0xe2, 0x9b, 0x08, 0xd7, 0xf8, + 0x8f, 0x01, 0xe5, 0x8e, 0x33, 0x4c, 0xc0, 0x47, 0x34, 0x4c, 0xbf, 0x5e, 0x31, 0x04, 0x56, 0x79, + 0x3a, 0xcf, 0xcc, 0x4b, 0xbd, 0x5f, 0x64, 0xf5, 0xd6, 0xd3, 0xdb, 0x99, 0x70, 0xa4, 0x78, 0x96, + 0xd3, 0xda, 0x87, 0x0d, 0x1d, 0xf0, 0x46, 0x9a, 0xff, 0x34, 0xa0, 0xf4, 0x6e, 0xe8, 0x3a, 0x82, + 0x74, 0x49, 0x9f, 0x32, 0x37, 0x6e, 0x70, 0x05, 0xf2, 0xc9, 0xd3, 0xcc, 0x7b, 0x6e, 0xf8, 0xf4, + 0x7c, 0xda, 0x77, 0x84, 0x47, 0x03, 0x45, 0x92, 0x7c, 0xa3, 0x06, 0x2c, 0x7e, 0xf1, 0x88, 0xef, + 0x72, 0xb3, 0x20, 0xbb, 0x7c, 0x9c, 0xea, 0x52, 0xc3, 0x6e, 0x1f, 0x4a, 0x78, 0xd4, 0x96, 0xca, + 0xb5, 0x76, 0x61, 0x69, 0x22, 0x7c, 0xa3, 0x26, 0xea, 0xb0, 0xd2, 0x88, 0x47, 0x54, 0x72, 0x5c, + 0x37, 0x1b, 0x6f, 0x43, 0xa9, 0x41, 0x7c, 0x32, 0xa7, 0x7b, 0xdc, 0x04, 0xb3, 0x49, 0x44, 0xfa, + 0x8c, 0x59, 0x4a, 0x55, 0xa0, 0x28, 0x3b, 0x3a, 0x0e, 0x0b, 0x50, 0x52, 0xc9, 0xc0, 0x2b, 0x32, + 0xc2, 0xbf, 0x0c, 0x58, 0x4d, 0x68, 0xa2, 0x33, 0xa7, 0x08, 0x1e, 0xc1, 0x5a, 0x2c, 0xed, 0xf1, + 0x99, 0xc7, 0x43, 0xa4, 0x7c, 0x3e, 0xc5, 0xee, 0x6a, 0x1c, 0x6f, 0x45, 0x61, 0xf4, 0x32, 0xa3, + 0x7c, 0x35, 0xa5, 0x7c, 0xe6, 0xa0, 0xff, 0xad, 0x7a, 0x07, 0x4a, 0x7b, 0xbe, 0x9f, 0x39, 0x84, + 0xa3, 0x67, 0x70, 0x9b, 0x45, 0x4b, 0x65, 0x32, 0x9b, 0xff, 0x2a, 0xaa, 0x1b, 0x83, 0x77, 0xbe, + 0x17, 0x26, 0xa4, 0x79, 0x4d, 0xe9, 0xf9, 0xc5, 0x10, 0x9d, 0x02, 0x9a, 0xb6, 0x43, 0xf4, 0x20, + 0x45, 0x38, 0xd3, 0x60, 0xad, 0x87, 0x73, 0x71, 0xd1, 0xfc, 0xe3, 0x1c, 0x7a, 0x0b, 0x25, 0xe5, + 0x7e, 0x7c, 0xec, 0x60, 0x1c, 0x6d, 0x5f, 0xcb, 0x1f, 0xad, 0xf5, 0x14, 0xec, 0x3d, 0xf5, 0x5c, + 0x9c, 0x43, 0x5f, 0xa1, 0xa4, 0xf1, 0x1c, 0x34, 0x55, 0xd4, 0x0c, 0xbf, 0xb3, 0xaa, 0xf3, 0x81, + 0x49, 0xf9, 0x3d, 0xd8, 0x98, 0x34, 0x13, 0x1e, 0xb9, 0x01, 0xcf, 0x28, 0x35, 0xd3, 0x6f, 0xb4, + 0x0d, 0xec, 0xfc, 0xce, 0xc3, 0xbd, 0xa6, 0x23, 0xc8, 0x95, 0x33, 0x4a, 0xee, 0xa5, 0x47, 0xd8, + 0xa5, 0xd7, 0x27, 0xe8, 0x00, 0x96, 0x27, 0xe7, 0x1a, 0x6d, 0xcd, 0x1b, 0x79, 0xbd, 0x46, 0x07, + 0xb0, 0x3c, 0x39, 0x7e, 0x19, 0x1a, 0xcd, 0x64, 0xea, 0x69, 0x3e, 0xc1, 0xfa, 0xd4, 0x78, 0x66, + 0xee, 0x6e, 0xd6, 0xf8, 0x5a, 0x15, 0xfd, 0xeb, 0x94, 0x18, 0x9c, 0x43, 0x6f, 0xe0, 0x6e, 0x93, + 0x08, 0xdd, 0x3b, 0x9f, 0xae, 0xc4, 0x4a, 0x97, 0xaf, 0x49, 0xc2, 0xb9, 0xfd, 0xca, 0xe7, 0xb2, + 0x04, 0xd5, 0xa2, 0x7f, 0x0d, 0xbe, 0x77, 0x52, 0x3b, 0xa5, 0xea, 0xcf, 0xc3, 0xc9, 0xa2, 0xfc, + 0x7d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0xae, 0x2c, 0x21, 0x8c, 0x7f, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -364,6 +633,194 @@ var _ grpc.ClientConnInterface // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 +// DirectoryLookupClient is the client API for DirectoryLookup service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DirectoryLookupClient interface { + // GetHostnameForHWID returns the hostname mapped to by hardware ID. + GetHostnameForHWID(ctx context.Context, in *GetHostnameForHWIDRequest, opts ...grpc.CallOption) (*GetHostnameForHWIDResponse, error) + // MapHWIDsToHostnames maps {hwid -> hostname}. + MapHWIDsToHostnames(ctx context.Context, in *MapHWIDToHostnameRequest, opts ...grpc.CallOption) (*Void, error) + // GetIMSIForSessionID returns the IMSI mapped to by session ID. + GetIMSIForSessionID(ctx context.Context, in *GetIMSIForSessionIDRequest, opts ...grpc.CallOption) (*GetIMSIForSessionIDResponse, error) + // MapSessionIDsToIMSIs maps {session ID -> IMSI}. + MapSessionIDsToIMSIs(ctx context.Context, in *MapSessionIDToIMSIRequest, opts ...grpc.CallOption) (*Void, error) +} + +type directoryLookupClient struct { + cc grpc.ClientConnInterface +} + +func NewDirectoryLookupClient(cc grpc.ClientConnInterface) DirectoryLookupClient { + return &directoryLookupClient{cc} +} + +func (c *directoryLookupClient) GetHostnameForHWID(ctx context.Context, in *GetHostnameForHWIDRequest, opts ...grpc.CallOption) (*GetHostnameForHWIDResponse, error) { + out := new(GetHostnameForHWIDResponse) + err := c.cc.Invoke(ctx, "/magma.orc8r.DirectoryLookup/GetHostnameForHWID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *directoryLookupClient) MapHWIDsToHostnames(ctx context.Context, in *MapHWIDToHostnameRequest, opts ...grpc.CallOption) (*Void, error) { + out := new(Void) + err := c.cc.Invoke(ctx, "/magma.orc8r.DirectoryLookup/MapHWIDsToHostnames", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *directoryLookupClient) GetIMSIForSessionID(ctx context.Context, in *GetIMSIForSessionIDRequest, opts ...grpc.CallOption) (*GetIMSIForSessionIDResponse, error) { + out := new(GetIMSIForSessionIDResponse) + err := c.cc.Invoke(ctx, "/magma.orc8r.DirectoryLookup/GetIMSIForSessionID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *directoryLookupClient) MapSessionIDsToIMSIs(ctx context.Context, in *MapSessionIDToIMSIRequest, opts ...grpc.CallOption) (*Void, error) { + out := new(Void) + err := c.cc.Invoke(ctx, "/magma.orc8r.DirectoryLookup/MapSessionIDsToIMSIs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DirectoryLookupServer is the server API for DirectoryLookup service. +type DirectoryLookupServer interface { + // GetHostnameForHWID returns the hostname mapped to by hardware ID. + GetHostnameForHWID(context.Context, *GetHostnameForHWIDRequest) (*GetHostnameForHWIDResponse, error) + // MapHWIDsToHostnames maps {hwid -> hostname}. + MapHWIDsToHostnames(context.Context, *MapHWIDToHostnameRequest) (*Void, error) + // GetIMSIForSessionID returns the IMSI mapped to by session ID. + GetIMSIForSessionID(context.Context, *GetIMSIForSessionIDRequest) (*GetIMSIForSessionIDResponse, error) + // MapSessionIDsToIMSIs maps {session ID -> IMSI}. + MapSessionIDsToIMSIs(context.Context, *MapSessionIDToIMSIRequest) (*Void, error) +} + +// UnimplementedDirectoryLookupServer can be embedded to have forward compatible implementations. +type UnimplementedDirectoryLookupServer struct { +} + +func (*UnimplementedDirectoryLookupServer) GetHostnameForHWID(ctx context.Context, req *GetHostnameForHWIDRequest) (*GetHostnameForHWIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHostnameForHWID not implemented") +} +func (*UnimplementedDirectoryLookupServer) MapHWIDsToHostnames(ctx context.Context, req *MapHWIDToHostnameRequest) (*Void, error) { + return nil, status.Errorf(codes.Unimplemented, "method MapHWIDsToHostnames not implemented") +} +func (*UnimplementedDirectoryLookupServer) GetIMSIForSessionID(ctx context.Context, req *GetIMSIForSessionIDRequest) (*GetIMSIForSessionIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIMSIForSessionID not implemented") +} +func (*UnimplementedDirectoryLookupServer) MapSessionIDsToIMSIs(ctx context.Context, req *MapSessionIDToIMSIRequest) (*Void, error) { + return nil, status.Errorf(codes.Unimplemented, "method MapSessionIDsToIMSIs not implemented") +} + +func RegisterDirectoryLookupServer(s *grpc.Server, srv DirectoryLookupServer) { + s.RegisterService(&_DirectoryLookup_serviceDesc, srv) +} + +func _DirectoryLookup_GetHostnameForHWID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHostnameForHWIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DirectoryLookupServer).GetHostnameForHWID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/magma.orc8r.DirectoryLookup/GetHostnameForHWID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DirectoryLookupServer).GetHostnameForHWID(ctx, req.(*GetHostnameForHWIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DirectoryLookup_MapHWIDsToHostnames_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MapHWIDToHostnameRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DirectoryLookupServer).MapHWIDsToHostnames(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/magma.orc8r.DirectoryLookup/MapHWIDsToHostnames", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DirectoryLookupServer).MapHWIDsToHostnames(ctx, req.(*MapHWIDToHostnameRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DirectoryLookup_GetIMSIForSessionID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetIMSIForSessionIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DirectoryLookupServer).GetIMSIForSessionID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/magma.orc8r.DirectoryLookup/GetIMSIForSessionID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DirectoryLookupServer).GetIMSIForSessionID(ctx, req.(*GetIMSIForSessionIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DirectoryLookup_MapSessionIDsToIMSIs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MapSessionIDToIMSIRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DirectoryLookupServer).MapSessionIDsToIMSIs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/magma.orc8r.DirectoryLookup/MapSessionIDsToIMSIs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DirectoryLookupServer).MapSessionIDsToIMSIs(ctx, req.(*MapSessionIDToIMSIRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DirectoryLookup_serviceDesc = grpc.ServiceDesc{ + ServiceName: "magma.orc8r.DirectoryLookup", + HandlerType: (*DirectoryLookupServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetHostnameForHWID", + Handler: _DirectoryLookup_GetHostnameForHWID_Handler, + }, + { + MethodName: "MapHWIDsToHostnames", + Handler: _DirectoryLookup_MapHWIDsToHostnames_Handler, + }, + { + MethodName: "GetIMSIForSessionID", + Handler: _DirectoryLookup_GetIMSIForSessionID_Handler, + }, + { + MethodName: "MapSessionIDsToIMSIs", + Handler: _DirectoryLookup_MapSessionIDsToIMSIs_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "orc8r/protos/directoryd.proto", +} + // GatewayDirectoryServiceClient is the client API for GatewayDirectoryService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. diff --git a/orc8r/lib/go/protos/directoryd_helper.go b/orc8r/lib/go/protos/directoryd_helper.go new file mode 100644 index 000000000000..2618bfcd19c3 --- /dev/null +++ b/orc8r/lib/go/protos/directoryd_helper.go @@ -0,0 +1,56 @@ +/* + Copyright (c) Facebook, Inc. and its affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +*/ + +package protos + +import ( + "errors" +) + +func (m *GetHostnameForHWIDRequest) Validate() error { + if m == nil { + return errors.New("request cannot be nil") + } + if m.Hwid == "" { + return errors.New("request params cannot be empty") + } + return nil +} + +func (m *MapHWIDToHostnameRequest) Validate() error { + if m == nil { + return errors.New("request cannot be nil") + } + if m.HwidToHostname == nil { + return errors.New("request params cannot be empty") + } + return nil +} + +func (m *GetIMSIForSessionIDRequest) Validate() error { + if m == nil { + return errors.New("request cannot be nil") + } + if m.SessionID == "" { + return errors.New("request params cannot be empty") + } + return nil +} + +func (m *MapSessionIDToIMSIRequest) Validate() error { + if m == nil { + return errors.New("request cannot be nil") + } + if m.NetworkID == "" { + return errors.New("network ID cannot be empty") + } + if m.SessionIDToIMSI == nil { + return errors.New("request params cannot be empty") + } + return nil +} diff --git a/orc8r/lib/go/protos/go.mod b/orc8r/lib/go/protos/go.mod index c0909171f03f..0f40c9af7f74 100644 --- a/orc8r/lib/go/protos/go.mod +++ b/orc8r/lib/go/protos/go.mod @@ -7,6 +7,9 @@ require ( github.com/golang/protobuf v1.3.3 github.com/prometheus/client_model v0.2.0 github.com/stretchr/testify v1.4.0 + golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 // indirect golang.org/x/net v0.0.0-20200202094626-16171245cfb2 + golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 // indirect google.golang.org/grpc v1.27.1 + honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc // indirect ) diff --git a/orc8r/lib/go/protos/logging_service.pb.go b/orc8r/lib/go/protos/logging_service.pb.go deleted file mode 100644 index ceaf494c5c51..000000000000 --- a/orc8r/lib/go/protos/logging_service.pb.go +++ /dev/null @@ -1,311 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: orc8r/protos/logging_service.proto - -package protos - -import ( - context "context" - fmt "fmt" - proto "github.com/golang/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Where to log to. Currently only supports scribe. -type LoggerDestination int32 - -const ( - LoggerDestination_SCRIBE LoggerDestination = 0 -) - -var LoggerDestination_name = map[int32]string{ - 0: "SCRIBE", -} - -var LoggerDestination_value = map[string]int32{ - "SCRIBE": 0, -} - -func (x LoggerDestination) String() string { - return proto.EnumName(LoggerDestination_name, int32(x)) -} - -func (LoggerDestination) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e52e10d1e5f65add, []int{0} -} - -type LogEntry struct { - // category of the log entry - Category string `protobuf:"bytes,1,opt,name=category,proto3" json:"category,omitempty"` - // required unix timestamp in seconds of the entry - Time int64 `protobuf:"varint,3,opt,name=time,proto3" json:"time,omitempty"` - // optional hardware id of the gateway if the LogEntry comes from a gateway - HwId string `protobuf:"bytes,4,opt,name=hw_id,json=hwId,proto3" json:"hw_id,omitempty"` - // optinoal map of normal(string) values - NormalMap map[string]string `protobuf:"bytes,5,rep,name=normal_map,json=normalMap,proto3" json:"normal_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // optional map of int values - IntMap map[string]int64 `protobuf:"bytes,6,rep,name=int_map,json=intMap,proto3" json:"int_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - // optional a set of string values, usually used for *gk_on* or *quick_experiment* - TagSet []string `protobuf:"bytes,7,rep,name=tag_set,json=tagSet,proto3" json:"tag_set,omitempty"` - // optional a vector of strings, usually used for stack traces - Normvector []string `protobuf:"bytes,8,rep,name=normvector,proto3" json:"normvector,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogEntry) Reset() { *m = LogEntry{} } -func (m *LogEntry) String() string { return proto.CompactTextString(m) } -func (*LogEntry) ProtoMessage() {} -func (*LogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_e52e10d1e5f65add, []int{0} -} - -func (m *LogEntry) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogEntry.Unmarshal(m, b) -} -func (m *LogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogEntry.Marshal(b, m, deterministic) -} -func (m *LogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogEntry.Merge(m, src) -} -func (m *LogEntry) XXX_Size() int { - return xxx_messageInfo_LogEntry.Size(m) -} -func (m *LogEntry) XXX_DiscardUnknown() { - xxx_messageInfo_LogEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_LogEntry proto.InternalMessageInfo - -func (m *LogEntry) GetCategory() string { - if m != nil { - return m.Category - } - return "" -} - -func (m *LogEntry) GetTime() int64 { - if m != nil { - return m.Time - } - return 0 -} - -func (m *LogEntry) GetHwId() string { - if m != nil { - return m.HwId - } - return "" -} - -func (m *LogEntry) GetNormalMap() map[string]string { - if m != nil { - return m.NormalMap - } - return nil -} - -func (m *LogEntry) GetIntMap() map[string]int64 { - if m != nil { - return m.IntMap - } - return nil -} - -func (m *LogEntry) GetTagSet() []string { - if m != nil { - return m.TagSet - } - return nil -} - -func (m *LogEntry) GetNormvector() []string { - if m != nil { - return m.Normvector - } - return nil -} - -type LogRequest struct { - Entries []*LogEntry `protobuf:"bytes,1,rep,name=Entries,proto3" json:"Entries,omitempty"` - Destination LoggerDestination `protobuf:"varint,2,opt,name=Destination,proto3,enum=magma.orc8r.LoggerDestination" json:"Destination,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogRequest) Reset() { *m = LogRequest{} } -func (m *LogRequest) String() string { return proto.CompactTextString(m) } -func (*LogRequest) ProtoMessage() {} -func (*LogRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_e52e10d1e5f65add, []int{1} -} - -func (m *LogRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogRequest.Unmarshal(m, b) -} -func (m *LogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogRequest.Marshal(b, m, deterministic) -} -func (m *LogRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogRequest.Merge(m, src) -} -func (m *LogRequest) XXX_Size() int { - return xxx_messageInfo_LogRequest.Size(m) -} -func (m *LogRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogRequest proto.InternalMessageInfo - -func (m *LogRequest) GetEntries() []*LogEntry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *LogRequest) GetDestination() LoggerDestination { - if m != nil { - return m.Destination - } - return LoggerDestination_SCRIBE -} - -func init() { - proto.RegisterEnum("magma.orc8r.LoggerDestination", LoggerDestination_name, LoggerDestination_value) - proto.RegisterType((*LogEntry)(nil), "magma.orc8r.LogEntry") - proto.RegisterMapType((map[string]int64)(nil), "magma.orc8r.LogEntry.IntMapEntry") - proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.LogEntry.NormalMapEntry") - proto.RegisterType((*LogRequest)(nil), "magma.orc8r.LogRequest") -} - -func init() { proto.RegisterFile("orc8r/protos/logging_service.proto", fileDescriptor_e52e10d1e5f65add) } - -var fileDescriptor_e52e10d1e5f65add = []byte{ - // 413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0x41, 0x6b, 0xdb, 0x30, - 0x14, 0xae, 0xab, 0xc4, 0x49, 0x5e, 0x20, 0xb4, 0x6f, 0x1b, 0x55, 0x3d, 0xe8, 0xb2, 0xb0, 0x83, - 0xd9, 0xc1, 0x86, 0xf4, 0xd2, 0x95, 0x1d, 0x46, 0xbb, 0x1c, 0x02, 0xde, 0x0e, 0x0a, 0xec, 0xb0, - 0x4b, 0x50, 0x1d, 0xa1, 0x8a, 0xd9, 0x52, 0x26, 0xab, 0x29, 0x3d, 0x0d, 0xf6, 0xcb, 0x47, 0xe4, - 0xba, 0x38, 0x5d, 0xa0, 0x27, 0xe9, 0x7b, 0xfa, 0xbe, 0x4f, 0xef, 0x3d, 0x3e, 0x98, 0x18, 0x9b, - 0x5f, 0xd8, 0x74, 0x6d, 0x8d, 0x33, 0x55, 0x5a, 0x18, 0x29, 0x95, 0x96, 0xcb, 0x4a, 0xd8, 0x8d, - 0xca, 0x45, 0xe2, 0xcb, 0x38, 0x2c, 0xb9, 0x2c, 0x79, 0xe2, 0x99, 0xd1, 0xe9, 0x8e, 0x20, 0x37, - 0x65, 0x69, 0x74, 0xcd, 0x9b, 0xfc, 0x25, 0xd0, 0xcf, 0x8c, 0x9c, 0x69, 0x67, 0x1f, 0x30, 0x82, - 0x7e, 0xce, 0x9d, 0x90, 0xc6, 0x3e, 0xd0, 0x60, 0x1c, 0xc4, 0x03, 0xf6, 0x84, 0x11, 0xa1, 0xe3, - 0x54, 0x29, 0x28, 0x19, 0x07, 0x31, 0x61, 0xfe, 0x8e, 0xaf, 0xa0, 0x7b, 0x7b, 0xbf, 0x54, 0x2b, - 0xda, 0xf1, 0xe4, 0xce, 0xed, 0xfd, 0x7c, 0x85, 0xd7, 0x00, 0xda, 0xd8, 0x92, 0x17, 0xcb, 0x92, - 0xaf, 0x69, 0x77, 0x4c, 0xe2, 0xe1, 0xf4, 0x43, 0xd2, 0x6a, 0x27, 0x69, 0xfe, 0x4b, 0xbe, 0x7b, - 0xde, 0x37, 0xbe, 0xf6, 0x90, 0x0d, 0x74, 0x83, 0xf1, 0x12, 0x7a, 0x4a, 0x3b, 0xef, 0x10, 0x7a, - 0x87, 0xf7, 0xfb, 0x1d, 0xe6, 0xda, 0x3d, 0xc9, 0x43, 0xe5, 0x01, 0x9e, 0x40, 0xcf, 0xf1, 0xed, - 0x3e, 0x1c, 0xed, 0x8d, 0x49, 0x3c, 0x60, 0xa1, 0xe3, 0x72, 0x21, 0x1c, 0x9e, 0xd5, 0x9d, 0x6d, - 0x44, 0xee, 0x8c, 0xa5, 0x7d, 0xff, 0xd6, 0xaa, 0x44, 0x9f, 0x61, 0xb4, 0xdb, 0x11, 0x1e, 0x01, - 0xf9, 0x25, 0x9a, 0x5d, 0x6c, 0xaf, 0xf8, 0x1a, 0xba, 0x1b, 0x5e, 0xdc, 0x09, 0x7a, 0xe8, 0x6b, - 0x35, 0xb8, 0x3c, 0xbc, 0x08, 0xa2, 0x4f, 0x30, 0x6c, 0x75, 0xf3, 0x92, 0x94, 0xb4, 0xa4, 0x93, - 0x3f, 0x00, 0x99, 0x91, 0x4c, 0xfc, 0xbe, 0x13, 0x95, 0xc3, 0x14, 0x7a, 0x5b, 0x0b, 0x25, 0x2a, - 0x1a, 0xf8, 0xd9, 0xdf, 0xec, 0x9d, 0x9d, 0x35, 0x2c, 0xfc, 0x02, 0xc3, 0xaf, 0xa2, 0x72, 0x4a, - 0x73, 0xa7, 0x8c, 0xf6, 0xf6, 0xa3, 0xe9, 0xd9, 0x73, 0x91, 0x14, 0xb6, 0xc5, 0x62, 0x6d, 0xc9, - 0xc7, 0x77, 0x70, 0xfc, 0x1f, 0x03, 0x01, 0xc2, 0xc5, 0x35, 0x9b, 0x5f, 0xcd, 0x8e, 0x0e, 0xa6, - 0x33, 0x18, 0x65, 0x75, 0xce, 0x16, 0x75, 0xcc, 0xf0, 0x1c, 0x48, 0x66, 0x24, 0x9e, 0x3c, 0xff, - 0xe6, 0x71, 0x8a, 0xe8, 0x78, 0xe7, 0xe1, 0x87, 0x51, 0xab, 0xc9, 0xc1, 0xd5, 0xdb, 0x9f, 0xa7, - 0xbe, 0x9a, 0xd6, 0x81, 0x2c, 0xd4, 0x4d, 0x2a, 0xcd, 0x63, 0x2e, 0x6f, 0x42, 0x7f, 0x9e, 0xff, - 0x0b, 0x00, 0x00, 0xff, 0xff, 0x9e, 0xcb, 0x1e, 0xfe, 0xdf, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// LoggingServiceClient is the client API for LoggingService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LoggingServiceClient interface { - // Log a list of LogEntry. - Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*Void, error) -} - -type loggingServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewLoggingServiceClient(cc grpc.ClientConnInterface) LoggingServiceClient { - return &loggingServiceClient{cc} -} - -func (c *loggingServiceClient) Log(ctx context.Context, in *LogRequest, opts ...grpc.CallOption) (*Void, error) { - out := new(Void) - err := c.cc.Invoke(ctx, "/magma.orc8r.LoggingService/Log", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// LoggingServiceServer is the server API for LoggingService service. -type LoggingServiceServer interface { - // Log a list of LogEntry. - Log(context.Context, *LogRequest) (*Void, error) -} - -// UnimplementedLoggingServiceServer can be embedded to have forward compatible implementations. -type UnimplementedLoggingServiceServer struct { -} - -func (*UnimplementedLoggingServiceServer) Log(ctx context.Context, req *LogRequest) (*Void, error) { - return nil, status.Errorf(codes.Unimplemented, "method Log not implemented") -} - -func RegisterLoggingServiceServer(s *grpc.Server, srv LoggingServiceServer) { - s.RegisterService(&_LoggingService_serviceDesc, srv) -} - -func _LoggingService_Log_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(LogRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LoggingServiceServer).Log(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/magma.orc8r.LoggingService/Log", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LoggingServiceServer).Log(ctx, req.(*LogRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _LoggingService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "magma.orc8r.LoggingService", - HandlerType: (*LoggingServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Log", - Handler: _LoggingService_Log_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "orc8r/protos/logging_service.proto", -} diff --git a/orc8r/lib/go/protos/service_desc.go b/orc8r/lib/go/protos/service_desc.go index 11643fcfd1c9..48f8fa63fe7a 100644 --- a/orc8r/lib/go/protos/service_desc.go +++ b/orc8r/lib/go/protos/service_desc.go @@ -30,16 +30,6 @@ func GetLegacyDispatcherDesc() *grpc.ServiceDesc { } } -func GetLegacyLoggerDesc() *grpc.ServiceDesc { - return &grpc.ServiceDesc{ - ServiceName: "magma.LoggingService", - HandlerType: _LoggingService_serviceDesc.HandlerType, - Methods: _LoggingService_serviceDesc.Methods, - Streams: _LoggingService_serviceDesc.Streams, - Metadata: _LoggingService_serviceDesc.Metadata, - } -} - func GetLegacyMetricsdDesc() *grpc.ServiceDesc { return &grpc.ServiceDesc{ ServiceName: "magma.MetricsController", diff --git a/orc8r/lib/go/protos/sync_rpc_service.pb.go b/orc8r/lib/go/protos/sync_rpc_service.pb.go index 21cd22a7e7f0..05bef845dfdb 100644 --- a/orc8r/lib/go/protos/sync_rpc_service.pb.go +++ b/orc8r/lib/go/protos/sync_rpc_service.pb.go @@ -303,84 +303,6 @@ func (m *SyncRPCResponse) GetHeartBeat() bool { return false } -type HardwareID struct { - Hwid string `protobuf:"bytes,1,opt,name=hwid,proto3" json:"hwid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HardwareID) Reset() { *m = HardwareID{} } -func (m *HardwareID) String() string { return proto.CompactTextString(m) } -func (*HardwareID) ProtoMessage() {} -func (*HardwareID) Descriptor() ([]byte, []int) { - return fileDescriptor_22887391e8a5ac6c, []int{4} -} - -func (m *HardwareID) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HardwareID.Unmarshal(m, b) -} -func (m *HardwareID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HardwareID.Marshal(b, m, deterministic) -} -func (m *HardwareID) XXX_Merge(src proto.Message) { - xxx_messageInfo_HardwareID.Merge(m, src) -} -func (m *HardwareID) XXX_Size() int { - return xxx_messageInfo_HardwareID.Size(m) -} -func (m *HardwareID) XXX_DiscardUnknown() { - xxx_messageInfo_HardwareID.DiscardUnknown(m) -} - -var xxx_messageInfo_HardwareID proto.InternalMessageInfo - -func (m *HardwareID) GetHwid() string { - if m != nil { - return m.Hwid - } - return "" -} - -type Hostname struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Hostname) Reset() { *m = Hostname{} } -func (m *Hostname) String() string { return proto.CompactTextString(m) } -func (*Hostname) ProtoMessage() {} -func (*Hostname) Descriptor() ([]byte, []int) { - return fileDescriptor_22887391e8a5ac6c, []int{5} -} - -func (m *Hostname) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Hostname.Unmarshal(m, b) -} -func (m *Hostname) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Hostname.Marshal(b, m, deterministic) -} -func (m *Hostname) XXX_Merge(src proto.Message) { - xxx_messageInfo_Hostname.Merge(m, src) -} -func (m *Hostname) XXX_Size() int { - return xxx_messageInfo_Hostname.Size(m) -} -func (m *Hostname) XXX_DiscardUnknown() { - xxx_messageInfo_Hostname.DiscardUnknown(m) -} - -var xxx_messageInfo_Hostname proto.InternalMessageInfo - -func (m *Hostname) GetName() string { - if m != nil { - return m.Name - } - return "" -} - func init() { proto.RegisterType((*GatewayRequest)(nil), "magma.orc8r.GatewayRequest") proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.GatewayRequest.HeadersEntry") @@ -388,8 +310,6 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "magma.orc8r.GatewayResponse.HeadersEntry") proto.RegisterType((*SyncRPCRequest)(nil), "magma.orc8r.SyncRPCRequest") proto.RegisterType((*SyncRPCResponse)(nil), "magma.orc8r.SyncRPCResponse") - proto.RegisterType((*HardwareID)(nil), "magma.orc8r.HardwareID") - proto.RegisterType((*Hostname)(nil), "magma.orc8r.Hostname") } func init() { @@ -397,41 +317,37 @@ func init() { } var fileDescriptor_22887391e8a5ac6c = []byte{ - // 533 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xda, 0x40, - 0x10, 0x8e, 0x43, 0xf8, 0xc9, 0x90, 0x92, 0x6a, 0x95, 0xa6, 0x2e, 0x44, 0x11, 0x72, 0xa5, 0xca, - 0xbd, 0x40, 0x45, 0x55, 0x09, 0xe5, 0x56, 0x68, 0x12, 0xd2, 0x53, 0xb5, 0x39, 0xb5, 0x97, 0x68, - 0xb1, 0x47, 0xd8, 0x0a, 0x78, 0xcd, 0xec, 0x02, 0xf2, 0xa9, 0x0f, 0xd2, 0x97, 0xec, 0x13, 0xb4, - 0x95, 0xff, 0x00, 0x47, 0x88, 0x1e, 0x72, 0xda, 0x99, 0xf9, 0x66, 0x76, 0xbf, 0x6f, 0x66, 0x77, - 0xe1, 0xad, 0x24, 0xa7, 0x4f, 0xdd, 0x90, 0xa4, 0x96, 0xaa, 0xab, 0xa2, 0xc0, 0x79, 0xa0, 0xd0, - 0x79, 0x50, 0x48, 0x4b, 0xdf, 0xc1, 0x4e, 0x12, 0x67, 0xf5, 0x99, 0x98, 0xcc, 0x44, 0x27, 0x49, - 0xb5, 0x7e, 0x1b, 0xd0, 0xb8, 0x15, 0x1a, 0x57, 0x22, 0xe2, 0x38, 0x5f, 0xa0, 0xd2, 0x8c, 0xc1, - 0xd1, 0x64, 0x75, 0xe7, 0x9a, 0x46, 0xdb, 0xb0, 0x8f, 0x79, 0x62, 0xb3, 0x0b, 0x38, 0x16, 0x0b, - 0xed, 0x49, 0xf2, 0x75, 0x64, 0x1e, 0x26, 0xc0, 0x26, 0x10, 0x57, 0x84, 0x42, 0x7b, 0x66, 0x29, - 0xad, 0x88, 0x6d, 0x36, 0x80, 0xaa, 0x87, 0xc2, 0x45, 0x52, 0x66, 0xb9, 0x5d, 0xb2, 0xeb, 0x3d, - 0xbb, 0xb3, 0x75, 0x6e, 0xa7, 0x78, 0x66, 0x67, 0x94, 0xa6, 0x5e, 0x07, 0x9a, 0x22, 0x9e, 0x17, - 0x32, 0x13, 0xaa, 0xa1, 0x88, 0xa6, 0x52, 0xb8, 0x66, 0xa5, 0x6d, 0xd8, 0x27, 0x3c, 0x77, 0x9b, - 0x57, 0x70, 0xb2, 0x5d, 0xc2, 0x5e, 0x42, 0xe9, 0x11, 0xa3, 0x8c, 0x72, 0x6c, 0xb2, 0x33, 0x28, - 0x2f, 0xc5, 0x74, 0x81, 0x19, 0xdb, 0xd4, 0xb9, 0x3a, 0xec, 0x1b, 0xd6, 0x1f, 0x03, 0x4e, 0xd7, - 0xc7, 0xab, 0x50, 0x06, 0x0a, 0xd9, 0x39, 0x54, 0x94, 0x16, 0x7a, 0xa1, 0xb2, 0x2d, 0x32, 0x8f, - 0x0d, 0x37, 0x2a, 0x0e, 0x13, 0x15, 0xef, 0x77, 0xab, 0x48, 0xb7, 0xf9, 0xbf, 0x8c, 0x52, 0x41, - 0x46, 0x4c, 0x1b, 0x89, 0xcc, 0xa3, 0x94, 0x36, 0x12, 0xb1, 0x77, 0xd0, 0x78, 0x44, 0x0c, 0x87, - 0x32, 0x08, 0x3e, 0x3b, 0xda, 0x5f, 0xa2, 0x59, 0x6e, 0x1b, 0x76, 0x8d, 0x3f, 0x89, 0x3e, 0xab, - 0x01, 0xbf, 0x0c, 0x68, 0xdc, 0x47, 0x81, 0xc3, 0xbf, 0x0d, 0xf3, 0x99, 0x9f, 0x41, 0x99, 0x70, - 0x9e, 0x0d, 0xfd, 0x05, 0x4f, 0x1d, 0xf6, 0x09, 0xaa, 0x84, 0xf3, 0x81, 0x74, 0xd3, 0x99, 0xd7, - 0x7b, 0xad, 0x3d, 0x33, 0xe4, 0x79, 0x6e, 0x7c, 0x59, 0x3c, 0x14, 0xa4, 0x07, 0x28, 0x74, 0xa2, - 0xb8, 0xc6, 0x37, 0x01, 0x76, 0x09, 0xe0, 0xc8, 0x20, 0x18, 0x4e, 0xa5, 0x42, 0x37, 0x91, 0x5e, - 0xe3, 0x5b, 0x11, 0xeb, 0x27, 0x9c, 0xae, 0xc9, 0x65, 0xd3, 0xd9, 0xcd, 0xae, 0x0f, 0x35, 0x42, - 0x15, 0x6e, 0xd1, 0xbb, 0xd8, 0x37, 0x1c, 0xbe, 0xce, 0xde, 0x4f, 0xd0, 0x6a, 0x03, 0x8c, 0x04, - 0xb9, 0x2b, 0x41, 0x78, 0xf7, 0x25, 0xbe, 0xdb, 0xde, 0xca, 0x5f, 0xbf, 0x86, 0xd8, 0xb6, 0x2e, - 0xa1, 0x36, 0x92, 0x4a, 0x07, 0x62, 0x86, 0x31, 0x1e, 0xaf, 0x39, 0x1e, 0xdb, 0xbd, 0xbf, 0x9b, - 0x06, 0xdf, 0xa7, 0x4f, 0x8f, 0x7d, 0x87, 0xf3, 0x6b, 0xa5, 0xc5, 0x78, 0xea, 0x2b, 0x2f, 0x87, - 0x34, 0xa1, 0x98, 0xb1, 0x22, 0xe9, 0x27, 0xd2, 0x9b, 0xad, 0xdd, 0x68, 0xd2, 0x71, 0xeb, 0xc0, - 0x36, 0x3e, 0x18, 0xec, 0x2b, 0x54, 0xb3, 0xf8, 0xf3, 0xf7, 0xba, 0x01, 0x76, 0x8b, 0x3a, 0x17, - 0x77, 0x23, 0x69, 0xb4, 0xf2, 0x5d, 0xf6, 0xba, 0x50, 0xb8, 0x69, 0x4e, 0xf3, 0x55, 0x11, 0xc8, - 0xca, 0xac, 0x83, 0x41, 0xeb, 0xc7, 0x9b, 0x04, 0xe9, 0xa6, 0x1f, 0xd2, 0xd4, 0x1f, 0x77, 0x27, - 0x32, 0xfb, 0x97, 0xc6, 0x95, 0x64, 0xfd, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x10, 0xa2, - 0x87, 0xae, 0x04, 0x00, 0x00, + // 475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcb, 0x6e, 0xda, 0x40, + 0x14, 0xed, 0x40, 0x78, 0xe4, 0x92, 0x92, 0x6a, 0x14, 0x45, 0x6e, 0x88, 0x2a, 0x44, 0xa5, 0xca, + 0xdd, 0x98, 0x8a, 0xaa, 0x12, 0xca, 0xae, 0xa0, 0xa8, 0x8f, 0x55, 0x35, 0x59, 0xb5, 0x9b, 0x68, + 0xb0, 0xaf, 0xb0, 0x15, 0xe3, 0x31, 0x33, 0x03, 0x91, 0x57, 0xfd, 0x90, 0x7e, 0x48, 0x7f, 0xab, + 0x5f, 0x50, 0x55, 0x9e, 0xb1, 0xc1, 0x44, 0x88, 0x2e, 0x58, 0x31, 0xf7, 0xcc, 0x7d, 0x9c, 0x73, + 0x0f, 0x1e, 0x78, 0x2d, 0xa4, 0x3f, 0x96, 0xc3, 0x54, 0x0a, 0x2d, 0xd4, 0x50, 0x65, 0x89, 0x7f, + 0x2f, 0x53, 0xff, 0x5e, 0xa1, 0x5c, 0x47, 0x3e, 0x7a, 0x06, 0xa7, 0x9d, 0x05, 0x9f, 0x2f, 0xb8, + 0x67, 0x52, 0x07, 0x7f, 0x08, 0x74, 0x3f, 0x71, 0x8d, 0x8f, 0x3c, 0x63, 0xb8, 0x5c, 0xa1, 0xd2, + 0x94, 0xc2, 0xc9, 0xfc, 0xf1, 0x4b, 0xe0, 0x90, 0x3e, 0x71, 0x4f, 0x99, 0x39, 0xd3, 0x6b, 0x38, + 0xe5, 0x2b, 0x1d, 0x0a, 0x19, 0xe9, 0xcc, 0xa9, 0x99, 0x8b, 0x2d, 0x90, 0x57, 0xa4, 0x5c, 0x87, + 0x4e, 0xdd, 0x56, 0xe4, 0x67, 0x3a, 0x81, 0x56, 0x88, 0x3c, 0x40, 0xa9, 0x9c, 0x46, 0xbf, 0xee, + 0x76, 0x46, 0xae, 0x57, 0x99, 0xeb, 0xed, 0xce, 0xf4, 0x3e, 0xdb, 0xd4, 0xdb, 0x44, 0xcb, 0x8c, + 0x95, 0x85, 0xd4, 0x81, 0x56, 0xca, 0xb3, 0x58, 0xf0, 0xc0, 0x69, 0xf6, 0x89, 0x7b, 0xc6, 0xca, + 0xf0, 0xea, 0x06, 0xce, 0xaa, 0x25, 0xf4, 0x05, 0xd4, 0x1f, 0x30, 0x2b, 0x28, 0xe7, 0x47, 0x7a, + 0x01, 0x8d, 0x35, 0x8f, 0x57, 0x58, 0xb0, 0xb5, 0xc1, 0x4d, 0x6d, 0x4c, 0x06, 0x7f, 0x09, 0x9c, + 0x6f, 0xc6, 0xab, 0x54, 0x24, 0x0a, 0xe9, 0x25, 0x34, 0x95, 0xe6, 0x7a, 0xa5, 0x8a, 0x16, 0x45, + 0x44, 0xa7, 0x5b, 0x15, 0x35, 0xa3, 0xe2, 0xed, 0x7e, 0x15, 0xb6, 0xcd, 0xff, 0x65, 0xd4, 0x77, + 0x64, 0xe4, 0xb4, 0x51, 0x4a, 0xe7, 0xc4, 0xd2, 0x46, 0x29, 0xe9, 0x1b, 0xe8, 0x3e, 0x20, 0xa6, + 0x53, 0x91, 0x24, 0x1f, 0x7d, 0x1d, 0xad, 0xd1, 0x69, 0xf4, 0x89, 0xdb, 0x66, 0x4f, 0xd0, 0xa3, + 0x16, 0xf0, 0x8b, 0x40, 0xf7, 0x2e, 0x4b, 0x7c, 0xf6, 0x6d, 0x5a, 0x7a, 0x7e, 0x01, 0x0d, 0x89, + 0xcb, 0xc2, 0xf4, 0xe7, 0xcc, 0x06, 0xf4, 0x03, 0xb4, 0x24, 0x2e, 0x27, 0x22, 0xb0, 0x9e, 0x77, + 0x46, 0xbd, 0x03, 0x1e, 0xb2, 0x32, 0x37, 0xff, 0xb3, 0x84, 0xc8, 0xa5, 0x9e, 0x20, 0xd7, 0x46, + 0x71, 0x9b, 0x6d, 0x01, 0xfa, 0x0a, 0xc0, 0x17, 0x49, 0x32, 0x8d, 0x85, 0xc2, 0xc0, 0x48, 0x6f, + 0xb3, 0x0a, 0x32, 0xf8, 0x09, 0xe7, 0x1b, 0x72, 0x85, 0x3b, 0xfb, 0xd9, 0x8d, 0xa1, 0x2d, 0x51, + 0xa5, 0x15, 0x7a, 0xd7, 0x87, 0xcc, 0x61, 0x9b, 0xec, 0xc3, 0x04, 0x47, 0xbf, 0xb7, 0xeb, 0xb9, + 0xb3, 0x1f, 0x0e, 0xfd, 0x0e, 0x97, 0xb7, 0x4a, 0xf3, 0x59, 0x1c, 0xa9, 0xb0, 0xbc, 0xd2, 0x12, + 0xf9, 0x82, 0xee, 0x8e, 0x7c, 0x42, 0xfc, 0xaa, 0xb7, 0xff, 0xd6, 0xec, 0x6b, 0xf0, 0xcc, 0x25, + 0xef, 0x08, 0xfd, 0x0a, 0xad, 0x02, 0x3f, 0xba, 0xd7, 0xa4, 0xf7, 0xe3, 0xa5, 0xc9, 0x19, 0xda, + 0x67, 0x20, 0x8e, 0x66, 0xc3, 0xb9, 0x28, 0x5e, 0x83, 0x59, 0xd3, 0xfc, 0xbe, 0xff, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x16, 0x73, 0xb2, 0x1e, 0x24, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -452,8 +368,6 @@ type SyncRPCServiceClient interface { EstablishSyncRPCStream(ctx context.Context, opts ...grpc.CallOption) (SyncRPCService_EstablishSyncRPCStreamClient, error) // same method as EstablishSyncRPCStream, but named differently for backward compatibility SyncRPC(ctx context.Context, opts ...grpc.CallOption) (SyncRPCService_SyncRPCClient, error) - // returns the hostname mapped to the current hwid - GetHostnameForHwid(ctx context.Context, in *HardwareID, opts ...grpc.CallOption) (*Hostname, error) } type syncRPCServiceClient struct { @@ -526,15 +440,6 @@ func (x *syncRPCServiceSyncRPCClient) Recv() (*SyncRPCRequest, error) { return m, nil } -func (c *syncRPCServiceClient) GetHostnameForHwid(ctx context.Context, in *HardwareID, opts ...grpc.CallOption) (*Hostname, error) { - out := new(Hostname) - err := c.cc.Invoke(ctx, "/magma.orc8r.SyncRPCService/GetHostnameForHwid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - // SyncRPCServiceServer is the server API for SyncRPCService service. type SyncRPCServiceServer interface { // creates a bidirectional stream from gateway to cloud @@ -543,8 +448,6 @@ type SyncRPCServiceServer interface { EstablishSyncRPCStream(SyncRPCService_EstablishSyncRPCStreamServer) error // same method as EstablishSyncRPCStream, but named differently for backward compatibility SyncRPC(SyncRPCService_SyncRPCServer) error - // returns the hostname mapped to the current hwid - GetHostnameForHwid(context.Context, *HardwareID) (*Hostname, error) } // UnimplementedSyncRPCServiceServer can be embedded to have forward compatible implementations. @@ -557,9 +460,6 @@ func (*UnimplementedSyncRPCServiceServer) EstablishSyncRPCStream(srv SyncRPCServ func (*UnimplementedSyncRPCServiceServer) SyncRPC(srv SyncRPCService_SyncRPCServer) error { return status.Errorf(codes.Unimplemented, "method SyncRPC not implemented") } -func (*UnimplementedSyncRPCServiceServer) GetHostnameForHwid(ctx context.Context, req *HardwareID) (*Hostname, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetHostnameForHwid not implemented") -} func RegisterSyncRPCServiceServer(s *grpc.Server, srv SyncRPCServiceServer) { s.RegisterService(&_SyncRPCService_serviceDesc, srv) @@ -617,33 +517,10 @@ func (x *syncRPCServiceSyncRPCServer) Recv() (*SyncRPCResponse, error) { return m, nil } -func _SyncRPCService_GetHostnameForHwid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HardwareID) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SyncRPCServiceServer).GetHostnameForHwid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/magma.orc8r.SyncRPCService/GetHostnameForHwid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SyncRPCServiceServer).GetHostnameForHwid(ctx, req.(*HardwareID)) - } - return interceptor(ctx, in, info, handler) -} - var _SyncRPCService_serviceDesc = grpc.ServiceDesc{ ServiceName: "magma.orc8r.SyncRPCService", HandlerType: (*SyncRPCServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetHostnameForHwid", - Handler: _SyncRPCService_GetHostnameForHwid_Handler, - }, - }, + Methods: []grpc.MethodDesc{}, Streams: []grpc.StreamDesc{ { StreamName: "EstablishSyncRPCStream", diff --git a/orc8r/protos/directoryd.proto b/orc8r/protos/directoryd.proto index c42c6a32598f..7c10f11b3286 100644 --- a/orc8r/protos/directoryd.proto +++ b/orc8r/protos/directoryd.proto @@ -13,11 +13,55 @@ package magma.orc8r; option go_package = "magma/orc8r/lib/go/protos"; // -------------------------------------------------------------------------- -// Directory Service -// -// -// +// Directory service -- controller // -------------------------------------------------------------------------- + +message GetHostnameForHWIDRequest { + string hwid = 1; +} + +message GetHostnameForHWIDResponse { + string hostname = 1; +} + +message MapHWIDToHostnameRequest { + map hwidToHostname = 1; +} + +message GetIMSIForSessionIDRequest { + string networkID = 1; + string sessionID = 2; +} + +message GetIMSIForSessionIDResponse { + string imsi = 1; +} + +message MapSessionIDToIMSIRequest { + string networkID = 1; + map sessionIDToIMSI = 2; +} + +// DirectoryLookup service associates various identities and locations. +// This service runs in the controller, generating and consuming mostly derived state. +service DirectoryLookup { + // GetHostnameForHWID returns the hostname mapped to by hardware ID. + rpc GetHostnameForHWID(GetHostnameForHWIDRequest) returns (GetHostnameForHWIDResponse) {}; + + // MapHWIDsToHostnames maps {hwid -> hostname}. + rpc MapHWIDsToHostnames(MapHWIDToHostnameRequest) returns (Void) {}; + + // GetIMSIForSessionID returns the IMSI mapped to by session ID. + rpc GetIMSIForSessionID(GetIMSIForSessionIDRequest) returns (GetIMSIForSessionIDResponse) {}; + + // MapSessionIDsToIMSIs maps {session ID -> IMSI}. + rpc MapSessionIDsToIMSIs(MapSessionIDToIMSIRequest) returns (Void) {}; +} + +// -------------------------------------------------------------------------- +// Directory service -- gateway +// -------------------------------------------------------------------------- + message UpdateRecordRequest { string id = 1; string location = 2; @@ -64,4 +108,4 @@ service GatewayDirectoryService { // Get all directory records rpc GetAllDirectoryRecords (Void) returns (AllDirectoryRecords) {}; -} \ No newline at end of file +} diff --git a/orc8r/protos/logging_service.proto b/orc8r/protos/logging_service.proto deleted file mode 100644 index aa66ba427b55..000000000000 --- a/orc8r/protos/logging_service.proto +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) 2016-present, Facebook, Inc. -// All rights reserved. -// -// This source code is licensed under the BSD-style license found in the -// LICENSE file in the root directory of this source tree. An additional grant -// of patent rights can be found in the PATENTS file in the same directory. -syntax = "proto3"; - -import "orc8r/protos/common.proto"; - -package magma.orc8r; -option go_package = "magma/orc8r/lib/go/protos"; - -message LogEntry { - // category of the log entry - string category = 1; - // required unix timestamp in seconds of the entry - int64 time = 3; - // optional hardware id of the gateway if the LogEntry comes from a gateway - string hw_id = 4; - // optinoal map of normal(string) values - map normal_map = 5; - // optional map of int values - map int_map = 6; - // optional a set of string values, usually used for *gk_on* or *quick_experiment* - repeated string tag_set = 7; - // optional a vector of strings, usually used for stack traces - repeated string normvector = 8; -} - -message LogRequest { - repeated LogEntry Entries = 1; - LoggerDestination Destination = 2; -} - -service LoggingService { - // Log a list of LogEntry. - rpc Log (LogRequest) returns (Void) {} -} - -// Where to log to. Currently only supports scribe. -enum LoggerDestination { - SCRIBE = 0; -} \ No newline at end of file diff --git a/orc8r/protos/sync_rpc_service.proto b/orc8r/protos/sync_rpc_service.proto index bb0ec6c1133d..557765ffc17a 100644 --- a/orc8r/protos/sync_rpc_service.proto +++ b/orc8r/protos/sync_rpc_service.proto @@ -58,14 +58,6 @@ message SyncRPCResponse { bool heartBeat = 3; } -message HardwareID { - string hwid = 1; -} - -message Hostname { - string name = 1; -} - service SyncRPCService { // creates a bidirectional stream from gateway to cloud // so cloud can send in SyncRPCRequest, and wait for SyncRPCResponse. @@ -74,7 +66,4 @@ service SyncRPCService { // same method as EstablishSyncRPCStream, but named differently for backward compatibility rpc SyncRPC (stream SyncRPCResponse) returns (stream SyncRPCRequest) {} - - // returns the hostname mapped to the current hwid - rpc GetHostnameForHwid(HardwareID) returns (Hostname) {} } diff --git a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPI.js b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPI.js index f6aa91346f47..441394b88b0a 100644 --- a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPI.js +++ b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPI.js @@ -12,11 +12,9 @@ import axios from 'axios'; import type { AddOrgUserResponse, - CreateDashboardResponse, CreateDatasourceResponse, CreateOrgResponse, CreateUserResponse, - Dashboard, DeleteOrgResponse, GetDatasourcesResponse, GetHealthResponse, @@ -59,11 +57,6 @@ export type GrafanaClient = { ) => GrafanaPromise, getDatasources: (orgID: number) => GrafanaPromise, - createDashboard: ( - db: Dashboard, - orgID: number, - ) => GrafanaPromise, - getHealth: () => GrafanaPromise, }; @@ -159,11 +152,7 @@ const client = ( url: apiURL + `/api/datasources`, method: 'POST', data: ds, - headers: { - ...constHeaders, - 'X-Grafana-Org-Id': orgId.toString(), - 'Content-Type': 'application/json', - }, + headers: {...constHeaders, 'X-Grafana-Org-Id': orgId.toString()}, }); }, @@ -188,18 +177,6 @@ const client = ( }); }, - async createDashboard( - db: Dashboard, - orgID: number, - ): GrafanaPromise { - return request({ - url: apiURL + `/api/dashboards/db/`, - method: 'POST', - data: db, - headers: {...constHeaders, 'X-Grafana-Org-Id': orgID.toString()}, - }); - }, - async getHealth(): GrafanaPromise { return request({ url: apiURL + `/api/health`, diff --git a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPIType.js b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPIType.js index fa7c3784aeae..60f743a74d47 100644 --- a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPIType.js +++ b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/GrafanaAPIType.js @@ -71,20 +71,6 @@ export type CreateUserResponse = { message: string, }; -export type Dashboard = { - dashboard: mixed, - folderId: number, - overwrite: boolean, -}; - -export type CreateDashboardResponse = { - id: number, - uid: string, - url: string, - status: string, - version: number, -}; - export type PostDatasource = { orgId: number, name: string, diff --git a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/dashboards/Dashboards.js b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/dashboards/Dashboards.js deleted file mode 100644 index ba070755a3be..000000000000 --- a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/dashboards/Dashboards.js +++ /dev/null @@ -1,430 +0,0 @@ -/** - * Copyright 2004-present Facebook. All Rights Reserved. - * - * This source code is licensed under the BSD-style license found in the - * LICENSE file in the root directory of this source tree. - * - * @flow - * @format - */ - -import * as Grafana from 'grafana-dash-gen'; - -const netIDVar = 'networkID'; -const gwIDVar = 'gatewayID'; - -const NetworkPanels: Array = [ - { - title: 'Disk Percent', - targets: [ - { - expr: 'sum(disk_percent{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'Number of Connected UEs', - targets: [ - { - expr: 'sum(ue_connected{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'Number of Registered UEs', - targets: [ - { - expr: 'sum(ue_registered{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'Number of Connected eNBs', - targets: [ - { - expr: 'sum(enb_connected{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'S1 Setup', - targets: [ - { - expr: 'sum(s1_setup{networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Total: {{networkID}}', - }, - { - expr: - 'sum(s1_setup{networkID=~"$networkID",result="success"}) by (networkID)', - legendFormat: 'Success: {{networkID}}', - }, - { - expr: - 'sum(s1_setup{networkID=~"$networkID"})by(networkID)-sum(s1_setup{result="success",networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Failure: {{networkID}}', - }, - ], - }, - { - title: 'Attach/Reg Attempts', - targets: [ - { - expr: 'sum(ue_attach{networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Total {{networkID}}', - }, - { - expr: - 'sum(ue_attach{networkID=~"$networkID",result="attach_proc_successful"}) by (networkID)', - legendFormat: 'Success: {{networkID}}', - }, - { - expr: - 'sum(ue_attach{networkID=~"$networkID"}) by (networkID) -sum(s1_setup{result="attach_proc_successful",networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Failure: {{networkID}}', - }, - ], - }, - { - title: 'Detach/Dereg Attempts', - targets: [ - { - expr: 'sum(ue_detach{networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Total: {{networkID}}', - }, - { - expr: - 'sum(ue_detach{networkID=~"$networkID",result="attach_proc_successful"}) by (networkID)', - legendFormat: 'Success: {{networkID}}', - }, - { - expr: - 'sum(ue_detach{networkID=~"$networkID"}) by (networkID) -sum(s1_setup{result="attach_proc_successful",networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Failure: {{networkID}}', - }, - ], - }, - { - title: 'GPS Connection Uptime', - targets: [ - { - expr: - 'avg(enodeb_gps_connected{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'Device Transmitting Status', - targets: [ - { - expr: - 'avg(enodeb_rf_tx_enabled{networkID=~"$networkID"}) by (networkID)', - legendFormat: '{{networkID}}', - }, - ], - }, - { - title: 'Service Requests', - targets: [ - { - expr: 'sum(service_request{networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Total: {{networkID}}', - }, - { - expr: - 'sum(service_request{networkID=~"$networkID",result="success"}) by (networkID)', - legendFormat: 'Success: {{networkID}}', - }, - { - expr: - 'sum(service_request{networkID=~"$networkID"}) by (networkID)-sum(s1_setup{result="success",networkID=~"$networkID"}) by (networkID)', - legendFormat: 'Failure: {{networkID}}', - }, - ], - }, -]; - -const GatewayPanels: Array = [ - { - title: 'E-Node B Status', - targets: [ - { - expr: - 'enodeb_rf_tx_enabled{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Connected Subscribers', - targets: [ - { - expr: 'ue_connected{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Download Throughput', - targets: [ - { - expr: - 'pdcp_user_plane_bytes_dl{gatewayID=~"$gatewayID",service="enodebd",networkID=~"$networkID"}/1000', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Upload Throughput', - targets: [ - { - expr: - 'pdcp_user_plane_bytes_ul{gatewayID=~"$gatewayID",service="enodebd",networkID=~"$networkID"}/1000', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Latency', - targets: [ - { - expr: - 'magmad_ping_rtt_ms{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Gateway CPU %', - targets: [ - { - expr: 'cpu_percent{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Temperature (℃)', - targets: [ - { - expr: 'temperature{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}} - {{sensor}}', - }, - ], - }, - { - title: 'Disk %', - targets: [ - { - expr: 'disk_percent{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 's6a Auth Failure', - targets: [ - { - expr: - 's6a_auth_failure{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, -]; - -const InternalPanels: Array = [ - { - title: 'Memory Utilization', - targets: [ - { - expr: - 'mem_free{gatewayID=~"$gatewayID"}/mem_total{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Temperature', - targets: [ - { - expr: - 'temperature{gatewayID=~"$gatewayID",sensor="coretemp_0",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}} - {{sensor}}', - }, - ], - }, - { - title: 'Virtual Memory', - targets: [ - { - expr: - 'virtual_memory_percent{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Backhaul Latency', - targets: [ - { - expr: - 'magmad_ping_rtt_ms{gatewayID=~"$gatewayID",service="magmad",host="8.8.8.8",metric="rtt_ms",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'System Uptime', - targets: [ - { - expr: - 'process_uptime_seconds{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, - { - title: 'Number of Service Restarts', - targets: [ - { - expr: - 'unexpected_service_restarts{gatewayID=~"$gatewayID",networkID=~"$networkID"}', - legendFormat: '{{gatewayID}}', - }, - ], - }, -]; - -export function NetworksDashboard() { - const row = new Grafana.Row({title: ''}); - NetworkPanels.forEach(conf => { - row.addPanel(newPanel(conf)); - }); - const db = new Grafana.Dashboard({ - schemaVersion: 6, - title: 'Networks', - templating: [networkTemplate()], - rows: [row], - editable: false, - }); - db.state.editable = false; - db.state.description = - 'Metrics relevant to the whole network. Do not edit: edits will be overwritten. Save this dashboard under another name to copy and edit.'; - return db; -} - -export function GatewaysDashboard() { - const row = new Grafana.Row({title: ''}); - GatewayPanels.forEach(conf => { - row.addPanel(newPanel(conf)); - }); - const db = new Grafana.Dashboard({ - schemaVersion: 6, - title: 'Gateways', - templating: [networkTemplate(), gatewayTemplate()], - rows: [row], - editable: false, - }); - db.state.editable = false; - db.state.description = - 'Metrics relevant to the gateways. Do not edit: edits will be overwritten. Save this dashboard under another name to copy and edit.'; - return db; -} - -export function InternalDashboard() { - const row = new Grafana.Row({title: ''}); - InternalPanels.forEach(conf => { - row.addPanel(newPanel(conf)); - }); - const db = new Grafana.Dashboard({ - schemaVersion: 6, - title: 'Internal', - templating: [networkTemplate(), gatewayTemplate()], - rows: [row], - }); - db.state.editable = false; - db.state.description = - 'Metrics relevant to the internals of gateways. Do not edit: edits will be overwritten. Save this dashboard under another name to copy and edit.'; - return db; -} - -type PanelParams = { - title: string, - targets: Array<{expr: string, legendFormat?: string}>, -}; - -function newPanel(params: PanelParams) { - const pan = new Grafana.Panels.Graph({ - title: params.title, - span: 6, - datasource: 'default', - }); - // Have to add this after to avoid grafana-dash-gen from forcing the target - // into a Graphite format - pan.state.targets = params.targets; - return pan; -} - -type TemplateParams = { - labelName: string, - query: string, - regex: string, -}; - -function variableTemplate(params: TemplateParams): TemplateConfig { - return { - allValue: '.+', - definition: params.query, - hide: 0, - includeAll: true, - allFormat: 'glob', - multi: true, - name: params.labelName, - query: params.query, - regex: params.regex, - type: 'query', - refresh: true, - useTags: false, - }; -} - -function networkTemplate(): TemplateConfig { - return variableTemplate({ - labelName: netIDVar, - query: `label_values(${netIDVar})`, - regex: `/.+/`, - }); -} - -// This templating schema will produce a variable in the dashboard -// named gatewayID which is a multi-selectable option of all the -// gateways associated with this organization that exist for the -// currently selected $networkID. $networkID variable must also -// be configured for this dashboard in order for it to work -function gatewayTemplate(): TemplateConfig { - return variableTemplate({ - labelName: gwIDVar, - query: `label_values({networkID=~"$networkID",gatewayID=~".+"}, ${gwIDVar})`, - regex: `/.+/`, - }); -} - -type TemplateConfig = { - allValue: string, - definition: string, - hide: number, - includeAll: boolean, - allFormat: string, - multi: boolean, - name: string, - query: string, - regex: string, - type: string, - refresh: boolean, - useTags: boolean, -}; diff --git a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/handlers.js b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/handlers.js index 65436f2b8ce1..7653f6c927b9 100644 --- a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/handlers.js +++ b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/handlers.js @@ -11,11 +11,6 @@ import {isEqual, sortBy} from 'lodash'; import MagmaV1API from '@fbcnms/platform-server/magma/index'; -import { - GatewaysDashboard, - InternalDashboard, - NetworksDashboard, -} from './dashboards/Dashboards'; import {Organization} from '@fbcnms/sequelize-models'; import {apiCredentials} from '../config'; @@ -423,76 +418,6 @@ export async function syncTenants(): Promise<{ return {completedTasks}; } -export async function syncDashboards( - client: GrafanaClient, - req: FBCNMSRequest, -): Promise<{ - completedTasks: Array, - errorTask?: Task, -}> { - const completedTasks: Array = []; - const grafanaOrgID = await getUserGrafanaOrgID(client, req.user); - - const org = await Organization.findOne({ - where: { - name: req.user.organization || '', - }, - }); - let networks: Array = []; - if (org) { - networks = org.networkIDs; - } - if (networks.length === 0) { - return { - completedTasks, - errorTask: { - name: `Finding Organization's networks`, - status: 500, - message: 'Unable to get the networks of an organization', - }, - }; - } - - const networksDB = NetworksDashboard().generate(); - const gatewaysDB = GatewaysDashboard().generate(); - const internalDB = InternalDashboard().generate(); - const posts = [ - { - dashboard: networksDB, - folderId: 0, - overwrite: true, - message: '', - }, - { - dashboard: gatewaysDB, - folderId: 0, - overwrite: true, - message: '', - }, - { - dashboard: internalDB, - folderId: 0, - overwrite: true, - message: '', - }, - ]; - - for (const post of posts) { - const createDBResp = await client.createDashboard(post, grafanaOrgID); - if (createDBResp.status !== 200) { - return { - completedTasks, - errorTask: { - name: 'Create Networks Dashboard', - status: createDBResp.status, - message: createDBResp.data, - }, - }; - } - } - return {completedTasks}; -} - export function makeGrafanaUsername(userID: number): string { return `NMSUser_${userID}`; } diff --git a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/routes.js b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/routes.js index b397afdab677..605a9ce2ee3c 100644 --- a/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/routes.js +++ b/symphony/app/fbcnms-packages/fbcnms-platform-server/grafana/routes.js @@ -18,7 +18,6 @@ import Client from './GrafanaAPI'; import GrafanaErrorMessage from './GrafanaErrorMessage'; import { makeGrafanaUsername, - syncDashboards, syncDatasource, syncGrafanaUser, syncTenants, @@ -66,12 +65,6 @@ const syncGrafana = () => { tenantsRes.errorTask, ); } - // Create Dashboards - const dbRes = await syncDashboards(grafanaAdminClient, req); - tasksCompleted.push(...dbRes.completedTasks); - if (dbRes.errorTask) { - return await displayErrorMessage(res, tasksCompleted, dbRes.errorTask); - } return next(); }; }; diff --git a/symphony/app/fbcnms-projects/inventory/app/common/PropertyType.js b/symphony/app/fbcnms-projects/inventory/app/common/PropertyType.js index 6fa6371c758c..dec69bfba061 100644 --- a/symphony/app/fbcnms-projects/inventory/app/common/PropertyType.js +++ b/symphony/app/fbcnms-projects/inventory/app/common/PropertyType.js @@ -16,6 +16,7 @@ export type PropertyType = {| type: PropertyKind, name: string, index: number, + category?: ?string, // one or more of the following potential value fields will have actual data, // depending on the property type selected for this property. // e.g. for 'email' the stringValue field will be populated diff --git a/symphony/app/fbcnms-projects/inventory/app/components/FilterBookmark.js b/symphony/app/fbcnms-projects/inventory/app/components/FilterBookmark.js index ac3629e31303..1973c7e1991a 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/FilterBookmark.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/FilterBookmark.js @@ -9,23 +9,33 @@ */ import type { - EntityType, - FiltersQuery, -} from './comparison_view/ComparisonViewTypes'; + AddReportFilterMutationResponse, + AddReportFilterMutationVariables, + FilterEntity, + FilterOperator, +} from '../mutations/__generated__/AddReportFilterMutation.graphql'; +import type {FiltersQuery} from './comparison_view/ComparisonViewTypes'; +import type {MutationCallbacks} from '../mutations/MutationCallbacks.js'; +import type {WithSnackbarProps} from 'notistack'; import * as React from 'react'; +import AddReportFilterMutation from '../mutations/AddReportFilterMutation'; import BookmarksIcon from '@material-ui/icons/Bookmarks'; import BookmarksOutlinedIcon from '@material-ui/icons/BookmarksOutlined'; import Button from '@fbcnms/ui/components/design-system/Button'; +import CircularProgress from '@material-ui/core/CircularProgress'; import DialogActions from '@material-ui/core/DialogActions'; import Popover from '@material-ui/core/Popover'; +import SnackbarItem from '@fbcnms/ui/components/SnackbarItem'; import Strings from '../common/CommonStrings'; import Text from '@fbcnms/ui/components/design-system/Text'; import TextInput from '@fbcnms/ui/components/design-system/Input/TextInput'; import fbt from 'fbt'; +import nullthrows from '@fbcnms/util/nullthrows'; import symphony from '../../../../fbcnms-packages/fbcnms-ui/theme/symphony'; import {makeStyles} from '@material-ui/styles'; +import {withSnackbar} from 'notistack'; const useStyles = makeStyles(() => ({ filledBookmarkButton: { @@ -58,10 +68,10 @@ const useStyles = makeStyles(() => ({ margin: '8px 2px', }, })); -type Props = { +type Props = WithSnackbarProps & { isBookmark: boolean, filters: FiltersQuery, - entity: EntityType, + entity: FilterEntity, }; const FilterBookmark = (props: Props) => { @@ -69,6 +79,8 @@ const FilterBookmark = (props: Props) => { const classes = useStyles(); const [anchorEl, setAnchorEl] = React.useState(null); const [name, setName] = React.useState(''); + const [saving, setSaving] = React.useState(false); + const [bookmarked, setBookmarked] = React.useState(isBookmark); const handleClick = event => { setAnchorEl(event.currentTarget); @@ -81,13 +93,92 @@ const FilterBookmark = (props: Props) => { const open = Boolean(anchorEl); const saveFilter = () => { + saveFilterReport(); handleClose(); }; + const toOperator = (op: string): FilterOperator => { + switch (op) { + case 'is': + return 'IS'; + case 'contains': + return 'CONTAINS'; + case 'date_greater_than': + return 'DATE_GREATER_THAN'; + case 'date_less_than': + return 'DATE_LESS_THAN'; + case 'is_not_one_of': + return 'IS_NOT_ONE_OF'; + case 'is_one_of': + return 'IS_ONE_OF'; + } + throw new Error(`Operator ${op} is not supported`); + }; + + const saveFilterReport = () => { + setSaving(true); + const filterInput = props.filters.map(f => { + if ( + f.propertyValue && + (!f.propertyValue?.name || !f.propertyValue?.type) + ) { + throw new Error(`Property is not supported`); + } + return { + filterType: f.name.toUpperCase(), + operator: toOperator(f.operator), + stringValue: f.stringValue, + idSet: f.idSet, + stringSet: f.stringSet, + boolValue: f.boolValue, + propertyValue: f.propertyValue + ? { + ...f.propertyValue, + name: nullthrows(f.propertyValue?.name), + type: nullthrows(f.propertyValue?.type), + } + : null, + }; + }); + const variables: AddReportFilterMutationVariables = { + input: { + name: name, + entity: props.entity, + filters: filterInput, + }, + }; + const callbacks: MutationCallbacks = { + onCompleted: (response, errors) => { + setSaving(false); + setBookmarked(true); + if (errors && errors[0]) { + props.enqueueSnackbar(errors[0].message, { + children: key => ( + + ), + }); + } + }, + onError: (error: Error) => { + setSaving(false); + props.enqueueSnackbar(error.message, { + children: key => ( + + ), + }); + }, + }; + AddReportFilterMutation(variables, callbacks); + }; + return ( <> - - - + <> + {saving ? ( + + ) : ( +
+
+ + SAVE SEARCH + +
+ setName(target.value)} + value={name} + /> + + + + +
+ )} + ); }; -export default FilterBookmark; +export default withSnackbar(FilterBookmark); diff --git a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/ComparisonViewTypes.js b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/ComparisonViewTypes.js index d3eb27436779..d101b8d3fad2 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/ComparisonViewTypes.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/ComparisonViewTypes.js @@ -30,6 +30,7 @@ export const OperatorMap = Object.freeze({ is: 'is', contains: 'contains', is_one_of: 'is_one_of', + is_not_one_of: 'is_not_one_of', date_greater_than: 'date_greater_than', date_less_than: 'date_less_than', }); diff --git a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/EquipmentPowerSearchBar.js b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/EquipmentPowerSearchBar.js index 2bddd1dda21c..953cefc2c10c 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/EquipmentPowerSearchBar.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/EquipmentPowerSearchBar.js @@ -17,7 +17,6 @@ import PowerSearchBar from '../power_search/PowerSearchBar'; import React from 'react'; import useLocationTypes from './hooks/locationTypesHook'; import usePropertyFilters from './hooks/propertiesHook'; -import {EntityTypeMap} from './ComparisonViewTypes'; import {EquipmentCriteriaConfig} from './EquipmentSearchConfig'; import {LogEvents, ServerLogger} from '../../common/LoggingUtils'; import {buildPropertyFilterConfigs, getSelectedFilter} from './FilterUtils'; @@ -58,7 +57,7 @@ const EquipmentPowerSearchBar = (props: Props) => { searchConfig={EquipmentCriteriaConfig} filterConfigs={filterConfigs} footer={footer} - entity={EntityTypeMap.equipment} + entity="EQUIPMENT" /> ); }; diff --git a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LinksPowerSearchBar.js b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LinksPowerSearchBar.js index c844097aee3b..b90e535bd848 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LinksPowerSearchBar.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LinksPowerSearchBar.js @@ -19,7 +19,6 @@ import PowerSearchBar from '../power_search/PowerSearchBar'; import React, {useContext} from 'react'; import useLocationTypes from './hooks/locationTypesHook'; import usePropertyFilters from './hooks/propertiesHook'; -import {EntityTypeMap} from './ComparisonViewTypes'; import {LinkCriteriaConfig} from './LinkSearchConfig'; import {LogEvents, ServerLogger} from '../../common/LoggingUtils'; import {buildPropertyFilterConfigs, getSelectedFilter} from './FilterUtils'; @@ -62,7 +61,7 @@ const LinksPowerSearchBar = (props: Props) => { filterConfigs={filterConfigs} footer={footer} exportPath={'/links'} - entity={EntityTypeMap.link} + entity="LINK" /> ); }; diff --git a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LocationsPowerSearchBar.js b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LocationsPowerSearchBar.js index 0cf5067a86ac..8e2b8d308978 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LocationsPowerSearchBar.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/LocationsPowerSearchBar.js @@ -18,7 +18,6 @@ import PowerSearchBar from '../power_search/PowerSearchBar'; import React from 'react'; import useLocationTypes from './hooks/locationTypesHook'; import usePropertyFilters from './hooks/propertiesHook'; -import {EntityTypeMap} from './ComparisonViewTypes'; import {LocationCriteriaConfig} from './LocationSearchConfig'; import {LogEvents, ServerLogger} from '../../common/LoggingUtils'; import {buildPropertyFilterConfigs, getSelectedFilter} from './FilterUtils'; @@ -57,7 +56,7 @@ const LocationsPowerSearchBar = (props: Props) => { filterConfigs={filterConfigs} footer={footer} exportPath={'/locations'} - entity={EntityTypeMap.location} + entity="LOCATION" /> ); }; diff --git a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/PortsPowerSearchBar.js b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/PortsPowerSearchBar.js index 48ffc2a8d6ab..406afb48bec5 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/PortsPowerSearchBar.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/comparison_view/PortsPowerSearchBar.js @@ -18,7 +18,6 @@ import PowerSearchBar from '../power_search/PowerSearchBar'; import React from 'react'; import useLocationTypes from './hooks/locationTypesHook'; import usePropertyFilters from './hooks/propertiesHook'; -import {EntityTypeMap} from './ComparisonViewTypes'; import {LogEvents, ServerLogger} from '../../common/LoggingUtils'; import {PortCriteriaConfig} from './PortSearchConfig'; import {buildPropertyFilterConfigs, getSelectedFilter} from './FilterUtils'; @@ -58,7 +57,7 @@ const PortsPowerSearchBar = (props: Props) => { filterConfigs={filterConfigs} footer={footer} exportPath={'/ports'} - entity={EntityTypeMap.port} + entity="PORT" /> ); }; diff --git a/symphony/app/fbcnms-projects/inventory/app/components/power_search/PowerSearchBar.js b/symphony/app/fbcnms-projects/inventory/app/components/power_search/PowerSearchBar.js index 3adf416f7582..fbd8cdd5c2dc 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/power_search/PowerSearchBar.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/power_search/PowerSearchBar.js @@ -10,12 +10,13 @@ import type { EntityConfig, - EntityType, FilterConfig, FilterValue, FiltersQuery, } from '../comparison_view/ComparisonViewTypes'; +import type {FilterEntity} from '../../mutations/__generated__/AddReportFilterMutation.graphql'; + import * as React from 'react'; import AppContext from '@fbcnms/ui/context/AppContext'; import CSVFileExport from '../CSVFileExport'; @@ -92,7 +93,7 @@ type Props = { onFilterBlurred?: (filter: FilterValue) => void, // used when a filter is selected from filter typeahead getSelectedFilter: (filterConfig: FilterConfig) => FilterValue, - entity?: EntityType, + entity?: FilterEntity, }; const PowerSearchBar = (props: Props) => { @@ -113,8 +114,10 @@ const PowerSearchBar = (props: Props) => { const [editingFilterIndex, setEditingFilterIndex] = useState((null: ?number)); const [isInputFocused, setIsInputFocused] = useState(false); + const [isBookmark, setIsBookmark] = useState(false); const onFilterValueChanged = (index: number, filterValue: FilterValue) => { + setIsBookmark(false); setFilterValues([ ...filterValues.slice(0, index), filterValue, @@ -127,6 +130,7 @@ const PowerSearchBar = (props: Props) => { const newFilterValues = update(filterValues, { $splice: [[index, 1]], }); + setIsBookmark(false); setFilterValues(newFilterValues); onFiltersChanged(newFilterValues); setEditingFilterIndex(null); @@ -145,6 +149,7 @@ const PowerSearchBar = (props: Props) => { const newFilterValues = update(filterValues, { [index]: {$set: filterValue}, }); + setIsBookmark(false); setFilterValues(newFilterValues); onFiltersChanged(newFilterValues); }; @@ -218,7 +223,7 @@ const PowerSearchBar = (props: Props) => { )} {savedSearch && entity && ( diff --git a/symphony/app/fbcnms-projects/inventory/app/components/services/ServiceComparisonView.js b/symphony/app/fbcnms-projects/inventory/app/components/services/ServiceComparisonView.js index 5a70f5dfe249..de58710da1fd 100644 --- a/symphony/app/fbcnms-projects/inventory/app/components/services/ServiceComparisonView.js +++ b/symphony/app/fbcnms-projects/inventory/app/components/services/ServiceComparisonView.js @@ -23,7 +23,6 @@ import symphony from '@fbcnms/ui/theme/symphony'; import useLocationTypes from '../comparison_view/hooks/locationTypesHook'; import usePropertyFilters from '../comparison_view/hooks/propertiesHook'; import useRouter from '@fbcnms/ui/hooks/useRouter'; -import {EntityTypeMap} from '../comparison_view/ComparisonViewTypes'; import {FormValidationContextProvider} from '@fbcnms/ui/components/design-system/Form/FormValidationContext'; import {ServiceSearchConfig} from './ServiceSearchConfig'; import { @@ -133,7 +132,7 @@ const ServiceComparisonView = () => { : `1 to ${count}` : null } - entity={EntityTypeMap.service} + entity={'SERVICE'} /> diff --git a/symphony/app/fbcnms-projects/inventory/app/mutations/AddReportFilterMutation.js b/symphony/app/fbcnms-projects/inventory/app/mutations/AddReportFilterMutation.js new file mode 100644 index 000000000000..495bba972c74 --- /dev/null +++ b/symphony/app/fbcnms-projects/inventory/app/mutations/AddReportFilterMutation.js @@ -0,0 +1,67 @@ +/** + * Copyright 2004-present Facebook. All Rights Reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + * + * @flow + * @format + */ + +import RelayEnvironemnt from '../common/RelayEnvironment.js'; +import {commitMutation, graphql} from 'react-relay'; +import type { + AddReportFilterMutation, + AddReportFilterMutationResponse, + AddReportFilterMutationVariables, +} from './__generated__/AddReportFilterMutation.graphql'; +import type {MutationCallbacks} from './MutationCallbacks.js'; +import type {StoreUpdater} from '../common/RelayEnvironment'; + +const mutation = graphql` + mutation AddReportFilterMutation($input: ReportFilterInput!) { + addReportFilter(input: $input) { + id + name + entity + filters { + filterType + operator + stringValue + idSet + stringSet + boolValue + propertyValue { + id + name + type + isEditable + isInstanceProperty + stringValue + intValue + floatValue + booleanValue + latitudeValue + longitudeValue + rangeFromValue + rangeToValue + } + } + } + } +`; + +export default ( + variables: AddReportFilterMutationVariables, + callbacks?: MutationCallbacks, + updater?: StoreUpdater, +) => { + const {onCompleted, onError} = callbacks ? callbacks : {}; + commitMutation(RelayEnvironemnt, { + mutation, + variables, + updater, + onCompleted, + onError, + }); +}; diff --git a/symphony/app/fbcnms-projects/inventory/app/mutations/__generated__/AddReportFilterMutation.graphql.js b/symphony/app/fbcnms-projects/inventory/app/mutations/__generated__/AddReportFilterMutation.graphql.js new file mode 100644 index 000000000000..e96132822b30 --- /dev/null +++ b/symphony/app/fbcnms-projects/inventory/app/mutations/__generated__/AddReportFilterMutation.graphql.js @@ -0,0 +1,345 @@ +/** + * @generated + * Copyright 2004-present Facebook. All Rights Reserved. + * + **/ + + /** + * @flow + * @relayHash 605bfb0dce4a13b5c64fcc4e18828c5a + */ + +/* eslint-disable */ + +'use strict'; + +/*:: +import type { ConcreteRequest } from 'relay-runtime'; +export type FilterEntity = "EQUIPMENT" | "LINK" | "LOCATION" | "PORT" | "SERVICE" | "WORK_ORDER" | "%future added value"; +export type FilterOperator = "CONTAINS" | "DATE_GREATER_THAN" | "DATE_LESS_THAN" | "IS" | "IS_NOT_ONE_OF" | "IS_ONE_OF" | "%future added value"; +export type PropertyKind = "bool" | "date" | "datetime_local" | "email" | "enum" | "equipment" | "float" | "gps_location" | "int" | "location" | "range" | "service" | "string" | "%future added value"; +export type ReportFilterInput = {| + name: string, + entity: FilterEntity, + filters?: ?$ReadOnlyArray, +|}; +export type GeneralFilterInput = {| + filterType: string, + operator: FilterOperator, + stringValue?: ?string, + idSet?: ?$ReadOnlyArray, + stringSet?: ?$ReadOnlyArray, + boolValue?: ?boolean, + propertyValue?: ?PropertyTypeInput, +|}; +export type PropertyTypeInput = {| + id?: ?string, + name: string, + type: PropertyKind, + index?: ?number, + category?: ?string, + stringValue?: ?string, + intValue?: ?number, + booleanValue?: ?boolean, + floatValue?: ?number, + latitudeValue?: ?number, + longitudeValue?: ?number, + rangeFromValue?: ?number, + rangeToValue?: ?number, + isEditable?: ?boolean, + isInstanceProperty?: ?boolean, + isMandatory?: ?boolean, + isDeleted?: ?boolean, +|}; +export type AddReportFilterMutationVariables = {| + input: ReportFilterInput +|}; +export type AddReportFilterMutationResponse = {| + +addReportFilter: {| + +id: string, + +name: string, + +entity: FilterEntity, + +filters: $ReadOnlyArray<{| + +filterType: string, + +operator: FilterOperator, + +stringValue: ?string, + +idSet: ?$ReadOnlyArray, + +stringSet: ?$ReadOnlyArray, + +boolValue: ?boolean, + +propertyValue: ?{| + +id: string, + +name: string, + +type: PropertyKind, + +isEditable: ?boolean, + +isInstanceProperty: ?boolean, + +stringValue: ?string, + +intValue: ?number, + +floatValue: ?number, + +booleanValue: ?boolean, + +latitudeValue: ?number, + +longitudeValue: ?number, + +rangeFromValue: ?number, + +rangeToValue: ?number, + |}, + |}>, + |} +|}; +export type AddReportFilterMutation = {| + variables: AddReportFilterMutationVariables, + response: AddReportFilterMutationResponse, +|}; +*/ + + +/* +mutation AddReportFilterMutation( + $input: ReportFilterInput! +) { + addReportFilter(input: $input) { + id + name + entity + filters { + filterType + operator + stringValue + idSet + stringSet + boolValue + propertyValue { + id + name + type + isEditable + isInstanceProperty + stringValue + intValue + floatValue + booleanValue + latitudeValue + longitudeValue + rangeFromValue + rangeToValue + } + } + } +} +*/ + +const node/*: ConcreteRequest*/ = (function(){ +var v0 = [ + { + "kind": "LocalArgument", + "name": "input", + "type": "ReportFilterInput!", + "defaultValue": null + } +], +v1 = { + "kind": "ScalarField", + "alias": null, + "name": "id", + "args": null, + "storageKey": null +}, +v2 = { + "kind": "ScalarField", + "alias": null, + "name": "name", + "args": null, + "storageKey": null +}, +v3 = { + "kind": "ScalarField", + "alias": null, + "name": "stringValue", + "args": null, + "storageKey": null +}, +v4 = [ + { + "kind": "LinkedField", + "alias": null, + "name": "addReportFilter", + "storageKey": null, + "args": [ + { + "kind": "Variable", + "name": "input", + "variableName": "input" + } + ], + "concreteType": "ReportFilter", + "plural": false, + "selections": [ + (v1/*: any*/), + (v2/*: any*/), + { + "kind": "ScalarField", + "alias": null, + "name": "entity", + "args": null, + "storageKey": null + }, + { + "kind": "LinkedField", + "alias": null, + "name": "filters", + "storageKey": null, + "args": null, + "concreteType": "GeneralFilter", + "plural": true, + "selections": [ + { + "kind": "ScalarField", + "alias": null, + "name": "filterType", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "operator", + "args": null, + "storageKey": null + }, + (v3/*: any*/), + { + "kind": "ScalarField", + "alias": null, + "name": "idSet", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "stringSet", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "boolValue", + "args": null, + "storageKey": null + }, + { + "kind": "LinkedField", + "alias": null, + "name": "propertyValue", + "storageKey": null, + "args": null, + "concreteType": "PropertyType", + "plural": false, + "selections": [ + (v1/*: any*/), + (v2/*: any*/), + { + "kind": "ScalarField", + "alias": null, + "name": "type", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "isEditable", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "isInstanceProperty", + "args": null, + "storageKey": null + }, + (v3/*: any*/), + { + "kind": "ScalarField", + "alias": null, + "name": "intValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "floatValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "booleanValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "latitudeValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "longitudeValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "rangeFromValue", + "args": null, + "storageKey": null + }, + { + "kind": "ScalarField", + "alias": null, + "name": "rangeToValue", + "args": null, + "storageKey": null + } + ] + } + ] + } + ] + } +]; +return { + "kind": "Request", + "fragment": { + "kind": "Fragment", + "name": "AddReportFilterMutation", + "type": "Mutation", + "metadata": null, + "argumentDefinitions": (v0/*: any*/), + "selections": (v4/*: any*/) + }, + "operation": { + "kind": "Operation", + "name": "AddReportFilterMutation", + "argumentDefinitions": (v0/*: any*/), + "selections": (v4/*: any*/) + }, + "params": { + "operationKind": "mutation", + "name": "AddReportFilterMutation", + "id": null, + "text": "mutation AddReportFilterMutation(\n $input: ReportFilterInput!\n) {\n addReportFilter(input: $input) {\n id\n name\n entity\n filters {\n filterType\n operator\n stringValue\n idSet\n stringSet\n boolValue\n propertyValue {\n id\n name\n type\n isEditable\n isInstanceProperty\n stringValue\n intValue\n floatValue\n booleanValue\n latitudeValue\n longitudeValue\n rangeFromValue\n rangeToValue\n }\n }\n }\n}\n", + "metadata": {} + } +}; +})(); +// prettier-ignore +(node/*: any*/).hash = '990637d691b7e35198b5c445629f8990'; +module.exports = node; diff --git a/symphony/app/fbcnms-projects/platform-server/scripts/createOrganization.js b/symphony/app/fbcnms-projects/platform-server/scripts/createOrganization.js index e34931802233..49e55206934e 100644 --- a/symphony/app/fbcnms-projects/platform-server/scripts/createOrganization.js +++ b/symphony/app/fbcnms-projects/platform-server/scripts/createOrganization.js @@ -90,7 +90,7 @@ function main() { const validTabs = getProjectTabs(); const tabs = args[1].split(',').map(tab => coerceToTab(tab)); - const invalidTabs = difference(tabs, validTabs).join(', '); + const invalidTabs = difference(tabs, validTabs.map(tab => tab.id)).join(', '); if (invalidTabs) { console.log( `tab should be one of: ${validTabs.join(', ')}. Got: ${invalidTabs}`, diff --git a/symphony/app/package.json b/symphony/app/package.json index 6cee4ee7ead2..c42322489cf9 100644 --- a/symphony/app/package.json +++ b/symphony/app/package.json @@ -42,7 +42,6 @@ "eslint-plugin-react-hooks": "^2.0.1", "eslint-plugin-relay": "^1.5.0", "eslint-plugin-sort-imports-es6-autofix": "^0.5.0", - "grafana-dash-gen": "^3.0.0", "jest": "^24.9.0", "prettier": "1.18.2", "regenerator-runtime": "^0.13.2", diff --git a/symphony/app/yarn.lock b/symphony/app/yarn.lock index 8eeef68ba60b..fc69bff936b8 100644 --- a/symphony/app/yarn.lock +++ b/symphony/app/yarn.lock @@ -6714,13 +6714,6 @@ error@7.0.2, error@^7.0.0: string-template "~0.2.1" xtend "~4.0.0" -error@^7.0.2: - version "7.2.1" - resolved "https://registry.yarnpkg.com/error/-/error-7.2.1.tgz#eab21a4689b5f684fc83da84a0e390de82d94894" - integrity sha512-fo9HBvWnx3NGUKMvMwB/CBCMMrfEJgbDTVDEkPygA3Bdd3lM1OyCd+rbQ8BwnpF6GdVeOLDNmyL4N5Bg80ZvdA== - dependencies: - string-template "~0.2.1" - es-abstract@^1.17.0, es-abstract@^1.17.0-next.0, es-abstract@^1.17.0-next.1, es-abstract@^1.17.2: version "1.17.4" resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.17.4.tgz#e3aedf19706b20e7c2594c35fc0d57605a79e184" @@ -8346,17 +8339,6 @@ graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.6 resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.3.tgz#4a12ff1b60376ef09862c2093edd908328be8423" integrity sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ== -grafana-dash-gen@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/grafana-dash-gen/-/grafana-dash-gen-3.0.0.tgz#edecaa4d37c797cfd5c476de617bb3824dfc3435" - integrity sha512-JG6vbvvDRJOm8zIkWBrsrEon14eStbQ/L1nSOapeLyEmw3dPR3C1g+IbAP83xpNXvIIIh2dkLKANVklsznpoyA== - dependencies: - error "^7.0.2" - request "^2.88.0" - request-promise-native "^1.0.7" - underscore "^1.8.3" - xtend "^4.0.0" - "graphql@^14.0.0 | ^15.0.0-rc.1": version "15.0.0-rc.1" resolved "https://registry.yarnpkg.com/graphql/-/graphql-15.0.0-rc.1.tgz#246c5c02ee60d2fe2f670bf97847da9ad95e7e0c" @@ -8473,7 +8455,7 @@ har-schema@^2.0.0: resolved "https://registry.yarnpkg.com/har-schema/-/har-schema-2.0.0.tgz#a94c2224ebcac04782a0d9035521f24735b7ec92" integrity sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI= -har-validator@~5.1.0, har-validator@~5.1.3: +har-validator@~5.1.0: version "5.1.3" resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-5.1.3.tgz#1ef89ebd3e4996557675eed9893110dc350fa080" integrity sha512-sNvOCzEQNr/qrvJgc3UG/kD4QtlHycrzwS+6mfTrrSq97BvaYcPZZI1ZSqGSPR73Cxn4LKTD4PttRwfU7jWq5g== @@ -14540,13 +14522,6 @@ request-promise-core@1.1.2: dependencies: lodash "^4.17.11" -request-promise-core@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/request-promise-core/-/request-promise-core-1.1.3.tgz#e9a3c081b51380dfea677336061fea879a829ee9" - integrity sha512-QIs2+ArIGQVp5ZYbWD5ZLCY29D5CfWizP8eWnm8FoGD1TX61veauETVQbrV60662V0oFBkrDOuaBI8XgtuyYAQ== - dependencies: - lodash "^4.17.15" - request-promise-native@^1.0.5: version "1.0.7" resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.7.tgz#a49868a624bdea5069f1251d0a836e0d89aa2c59" @@ -14556,15 +14531,6 @@ request-promise-native@^1.0.5: stealthy-require "^1.1.1" tough-cookie "^2.3.3" -request-promise-native@^1.0.7: - version "1.0.8" - resolved "https://registry.yarnpkg.com/request-promise-native/-/request-promise-native-1.0.8.tgz#a455b960b826e44e2bf8999af64dff2bfe58cb36" - integrity sha512-dapwLGqkHtwL5AEbfenuzjTYg35Jd6KPytsC2/TLkVMz8rm+tNt72MGUWT1RP/aYawMpN6HqbNGBQaRcBtjQMQ== - dependencies: - request-promise-core "1.1.3" - stealthy-require "^1.1.1" - tough-cookie "^2.3.3" - "request@>=2.76.0 <3.0.0", request@^2.79.0, request@^2.87.0: version "2.88.0" resolved "https://registry.yarnpkg.com/request/-/request-2.88.0.tgz#9c2fca4f7d35b592efe57c7f0a55e81052124fef" @@ -14591,32 +14557,6 @@ request-promise-native@^1.0.7: tunnel-agent "^0.6.0" uuid "^3.3.2" -request@^2.88.0: - version "2.88.2" - resolved "https://registry.yarnpkg.com/request/-/request-2.88.2.tgz#d73c918731cb5a87da047e207234146f664d12b3" - integrity sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw== - dependencies: - aws-sign2 "~0.7.0" - aws4 "^1.8.0" - caseless "~0.12.0" - combined-stream "~1.0.6" - extend "~3.0.2" - forever-agent "~0.6.1" - form-data "~2.3.2" - har-validator "~5.1.3" - http-signature "~1.2.0" - is-typedarray "~1.0.0" - isstream "~0.1.2" - json-stringify-safe "~5.0.1" - mime-types "~2.1.19" - oauth-sign "~0.9.0" - performance-now "^2.1.0" - qs "~6.5.2" - safe-buffer "^5.1.2" - tough-cookie "~2.5.0" - tunnel-agent "^0.6.0" - uuid "^3.3.2" - require-directory@^2.1.1: version "2.1.1" resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" @@ -16452,7 +16392,7 @@ touch@^3.1.0: dependencies: nopt "~1.0.10" -tough-cookie@^2.3.3, tough-cookie@^2.3.4, tough-cookie@~2.5.0: +tough-cookie@^2.3.3, tough-cookie@^2.3.4: version "2.5.0" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.5.0.tgz#cd9fb2a0aa1d5a12b473bd9fb96fa3dcff65ade2" integrity sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g== @@ -16640,11 +16580,6 @@ undefsafe@^2.0.2: dependencies: debug "^2.2.0" -underscore@^1.8.3: - version "1.9.2" - resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.9.2.tgz#0c8d6f536d6f378a5af264a72f7bec50feb7cf2f" - integrity sha512-D39qtimx0c1fI3ya1Lnhk3E9nONswSKhnffBI0gME9C99fYOkNi04xs8K6pePLhvl1frbDemkaBQ5ikWllR2HQ== - unfetch@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/unfetch/-/unfetch-4.1.0.tgz#6ec2dd0de887e58a4dee83a050ded80ffc4137db" diff --git a/symphony/cli/tests/pyinventory_tests/utils/base_test.py b/symphony/cli/tests/pyinventory_tests/utils/base_test.py index 95d25b7179b3..817cafab6bab 100644 --- a/symphony/cli/tests/pyinventory_tests/utils/base_test.py +++ b/symphony/cli/tests/pyinventory_tests/utils/base_test.py @@ -34,6 +34,11 @@ def _waitForPlatform(cls) -> None: response = requests.get(PLATFORM_SERVER_HEALTH_CHECK_URL, timeout=0.5) if response.status_code == 200: return - except Exception: + print( + f"Response failed with status code {response.status_code}" + f' and with message "{response.text}"' + ) + except Exception as e: + print(f"Request failed with exception {e}") time.sleep(0.5) raise Exception("Failed to wait for platform") diff --git a/symphony/docs/md/release-notes.md b/symphony/docs/md/release-notes.md new file mode 100644 index 000000000000..8abe16bb80de --- /dev/null +++ b/symphony/docs/md/release-notes.md @@ -0,0 +1,32 @@ +--- +id: release-notes +title: Release Notes +--- + +### Work In Progress + +The team is currently working on: +* Saved Searches: Ability to save past searches as bookmarks +* Mobile app: View work orders and site surveys in the mobile app +* Permission model: Customizeable infrastructure to enable roles and policies +* API documentation: Improving documentation for existing APIs +* Check lists for work orders: Enable check list items for work orders +* Enable SSO integration via Keycloak + + +### Release Notes +* 3/8/2020 + * Bug fixes + * **Validations on graphql**: several data validation were happening on the UI and not checked when calling directly the GraphQL API. Moved those validations to the GraphQL endpoint. + * **UI fixes**: Improved User Experience + * Inventory + * **Adding warning before deletion**: When an equipment is being deleted, and this equipment has sub-equipments, warn the user that this deletion will delete more than 1 object + * Work Orders + * **Export**: Added "Export to CSV" option to Word Orders search view + * Infra + * **Subscriptions**: Send notifications via GraphQL subscriptions about changes to WO status + * **Safe changes to our GraphQL schema**: Block changes to GraphQL that are breaking previous versions from being pushed to production + * **Adding flow typing**: Improve the Flow coverage in UI files + * **Enhancing UI Design system**: Icons, Generic View Containers, Radio Groups, Tabs, Different Table views + * APIs + * **Pyinventory**: Added: edit equipment & port & link properties \ No newline at end of file diff --git a/symphony/docs/md/team-processes.md b/symphony/docs/md/team-processes.md new file mode 100644 index 000000000000..054cccd93f03 --- /dev/null +++ b/symphony/docs/md/team-processes.md @@ -0,0 +1,46 @@ +--- +id: team-processes +title: Internal Processes +--- + +### Release Process + +Symphony is using the continuous push methodology. This means that new code is pushed to production whenever changes are done, usually every 30 minutes. +This enables the Symphony team to move fast and react to partner requests in real time. Bugs are fixed in a matter of hours, and new requests are developed in a matter of days. + +The product is protected by numerous automated tests. Unit tests, integrations tests and UI E2E tests are all in place to block the push in case a major feature was broken. + +### SEV Process + +The Symphony team is taking any breakage in the product seriously. +Every week one team member is an “oncall” - responsible for the health of production. He is constantly fixing bugs, monitoring any report from our partners and improving the quality of the tool. +Whenever a serious problem occurs, we are opening a “SEV”. SEV is an incident report, that requires “all hands on deck”. It means that all of the team is focused on solving the issue ASAP. +The severity of the SEV is as follows: + +* SEV 3 + * A high priority feature is not working in prod (e.g. connect links, pyinventory) + * High number of intermittent failures +* SEV2 + * 1-2 prod partners are down + * Internal partner is down +* SEV1 + * "Production" is down (Inventory\WO is inaccessible for all partners) + * Data layer is inconsistent and partner's data is lost + +Our commitment towards fixing SEVs is as follows: +* SEV 3: Fix during regular business hours. +* SEV 2: Fix with reasonable after hours. Feel free to ping others or even wake up people until the problem is resolved. +* SEV 1: All hands on deck until fixed! Fix, even with unreasonable after hours. + +After the SEV is mitigated, the team is having a “postmortem” meeting to review the SEV. SEV timeline, cause, time to mitigate- all numbers are reviewed. The team is leaving this meeting with a set of tasks to do: add automated tests, improve code infrastructure, fix bugs, etc. + +After every SEV, our goal is not only to fix the problem, but also to fix the code in such a way that similar SEVs will never occur. + +### Deprecating APIs +If changes are done to the schema we will mark old endpoints as deprecated. +Here you will find the list of deprecated endpoints [list of deprecated endpoints](graphql-breaking-changes.md) + +Partners are expected to upgrade their code to the new version. +After months of Deprecated state we will delete old endpoints. + +<< TBD more details >> \ No newline at end of file diff --git a/symphony/docs/website/sidebars.json b/symphony/docs/website/sidebars.json index 4756ccc83a1a..349592fb7a1a 100755 --- a/symphony/docs/website/sidebars.json +++ b/symphony/docs/website/sidebars.json @@ -1,15 +1,15 @@ { "wiki": { + "General": [ + "team-processes", + "release-notes" + ], "Inventory": [ "inventory-intro", "csv-upload", - "equipment-export", "service-export" ], - "NMS": [ - "nms-overview" - ], "Mobile App": [ "mobile-app-intro", "root-device" diff --git a/symphony/graph/cmd/entscript/main.go b/symphony/graph/cmd/entscript/main.go index 4cc52e62e63a..5a2b78282b3b 100644 --- a/symphony/graph/cmd/entscript/main.go +++ b/symphony/graph/cmd/entscript/main.go @@ -7,6 +7,7 @@ package main import ( "context" + "github.com/facebookincubator/ent/dialect/sql" "github.com/facebookincubator/symphony/graph/ent" "github.com/facebookincubator/symphony/graph/event" "github.com/facebookincubator/symphony/graph/graphql/generated" @@ -21,8 +22,8 @@ import ( func main() { kingpin.HelpFlag.Short('h') dsn := kingpin.Flag("db-dsn", "data source name").Envar("MYSQL_DSN").Required().String() - tenant := kingpin.Flag("tenant", "tenant name to target").Required().String() - user := kingpin.Flag("user", "user name to target").Required().String() + tenantName := kingpin.Flag("tenant", "tenant name to target. \"ALL\" for running on all tenants").Required().String() + u := kingpin.Flag("user", "user name to target").Required().String() logcfg := log.AddFlags(kingpin.CommandLine) kingpin.Parse() @@ -31,8 +32,8 @@ func main() { logger.For(ctx).Info("params", zap.Stringp("dsn", dsn), - zap.Stringp("tenant", tenant), - zap.Stringp("user", user), + zap.Stringp("tenant", tenantName), + zap.Stringp("user", u), ) tenancy, err := viewer.NewMySQLTenancy(*dsn) if err != nil { @@ -43,55 +44,98 @@ func main() { } mysql.SetLogger(logger) - v := &viewer.Viewer{Tenant: *tenant, User: *user} - ctx = log.NewFieldsContext(ctx, zap.Object("viewer", v)) - ctx = viewer.NewContext(ctx, v) - client, err := tenancy.ClientFor(ctx, *tenant) + driver, err := sql.Open("mysql", *dsn) if err != nil { - logger.For(ctx).Fatal("cannot get ent client for tenant", - zap.Stringp("tenant", tenant), + logger.For(ctx).Fatal("cannot connect sql database", + zap.Stringp("dsn", dsn), zap.Error(err), ) } - tx, err := client.Tx(ctx) + tenants, err := getTenantList(ctx, driver, tenantName) if err != nil { - logger.For(ctx).Fatal("cannot begin transaction", zap.Error(err)) + logger.For(ctx).Fatal("cannot get tenants to run on", + zap.Stringp("dsn", dsn), + zap.Stringp("tenant", tenantName), + zap.Error(err), + ) } - defer func() { - if r := recover(); r != nil { + + for _, tenant := range tenants { + v := &viewer.Viewer{Tenant: tenant, User: *u} + ctx := log.NewFieldsContext(ctx, zap.Object("viewer", v)) + ctx = viewer.NewContext(ctx, v) + client, err := tenancy.ClientFor(ctx, tenant) + if err != nil { + logger.For(ctx).Fatal("cannot get ent client for tenant", + zap.String("tenant", tenant), + zap.Error(err), + ) + } + + tx, err := client.Tx(ctx) + if err != nil { + logger.For(ctx).Fatal("cannot begin transaction", zap.Error(err)) + } + defer func() { + if r := recover(); r != nil { + if err := tx.Rollback(); err != nil { + logger.For(ctx).Error("cannot rollback transaction", zap.Error(err)) + } + logger.For(ctx).Panic("application panic", zap.Reflect("error", r)) + } + }() + + ctx = ent.NewContext(ctx, tx.Client()) + // Since the client is already uses transaction we can't have transactions on graphql also + r := resolver.New( + resolver.Config{ + Logger: logger, + Emitter: event.NewNopEmitter(), + Subscriber: event.NewNopSubscriber(), + }, + resolver.WithTransaction(false), + ) + + if err := utilityFunc(ctx, r, logger, tenant); err != nil { + logger.For(ctx).Error("failed to run function", zap.Error(err)) if err := tx.Rollback(); err != nil { logger.For(ctx).Error("cannot rollback transaction", zap.Error(err)) } - logger.For(ctx).Panic("application panic", zap.Reflect("error", r)) + return } - }() - ctx = ent.NewContext(ctx, tx.Client()) - // Since the client is already uses transaction we can't have transactions on graphql also - r := resolver.New( - resolver.Config{ - Logger: logger, - Emitter: event.NewNopEmitter(), - Subscriber: event.NewNopSubscriber(), - }, - resolver.WithTransaction(false), - ) - - if err := utilityFunc(ctx, r, logger); err != nil { - logger.For(ctx).Error("failed to run function", zap.Error(err)) - if err := tx.Rollback(); err != nil { - logger.For(ctx).Error("cannot rollback transaction", zap.Error(err)) + if err := tx.Commit(); err != nil { + logger.For(ctx).Error("cannot commit transaction", zap.Error(err)) + return } - return } +} - if err := tx.Commit(); err != nil { - logger.For(ctx).Error("cannot commit transaction", zap.Error(err)) +func getTenantList(ctx context.Context, driver *sql.Driver, tenant *string) ([]string, error) { + if *tenant != "ALL" { + return []string{*tenant}, nil + } + rows, err := driver.DB().QueryContext(ctx, + "SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME LIKE ?", viewer.DBName("%"), + ) + if err != nil { + return nil, err + } + defer rows.Close() + var tenants []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + name = viewer.FromDBName(name) + tenants = append(tenants, name) } + return tenants, nil } -func utilityFunc(_ context.Context, _ generated.ResolverRoot, _ log.Logger) error { +func utilityFunc(ctx context.Context, _ generated.ResolverRoot, logger log.Logger, tenant string) error { /** Add your Go code in this function You need to run this code from the same version production is at to avoid schema mismatches diff --git a/symphony/graph/graphhttp/router.go b/symphony/graph/graphhttp/router.go index 9d599914b1be..03b0a630b423 100644 --- a/symphony/graph/graphhttp/router.go +++ b/symphony/graph/graphhttp/router.go @@ -43,9 +43,6 @@ func newRouter(cfg routerConfig) (*mux.Router, func(), error) { func(h http.Handler) http.Handler { return viewer.TenancyHandler(h, cfg.viewer.tenancy) }, - func(h http.Handler) http.Handler { - return viewer.UserHandler(h, cfg.logger) - }, func(h http.Handler) http.Handler { return actions.Handler(h, cfg.logger, cfg.actions.registry) },