diff --git a/address_utils/address_utils.go b/address_utils/address_utils.go
index 4e8110b5c97a22ee853d6aa4e84615e58bd62712..bb58a469af0be7e44c48e0747b00aaf0168ee1d1 100644
--- a/address_utils/address_utils.go
+++ b/address_utils/address_utils.go
@@ -73,3 +73,70 @@ func stripUnwantedCharacters(s string) string {
 
 	return s
 }
+
+func CleanZone(oldCountry, oldZone *string) (newCountry, newZone *string) {
+	// Google zones
+	/*
+		"long_name": "KwaZulu-Natal",
+		"short_name": "KZN",
+
+		"long_name": "Gauteng",
+		"short_name": "GP",
+
+		"long_name": "Free State",
+		"short_name": "FS",
+
+		"long_name": "Limpopo",
+		"short_name": "LP",
+
+		"long_name": "Mpumalanga",
+		"short_name": "MP",
+
+		"long_name": "North West",
+		"short_name": "NW",
+
+		"long_name": "Western Cape",
+		"short_name": "WC",
+
+		"long_name": "Eastern Cape",
+		"short_name": "EC",
+
+		"long_name": "Northern Cape",
+		"short_name": "NC",
+	*/
+
+	newCountry = oldCountry
+	if oldCountry == nil || *oldCountry == "South Africa" || len(*oldCountry) == 0 {
+		defaultCountry := "ZA"
+		newCountry = &defaultCountry
+	}
+
+	if *newCountry == "ZA" && oldZone != nil {
+		zone := *oldZone
+		// Gauteng - GT from uAfrica should be GP for Google
+		if zone == "GT" {
+			zone = "GP"
+		} else if zone == "NT" {
+			zone = "KZN"
+		}
+
+		zone = string_utils.ReplaceCaseInsensitive(zone, "KwaZulu-Natal", "KZN")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "KwaZulu Natal", "KZN")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Gauteng", "GP")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Freestate", "FS")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Free State", "FS")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Limpopo", "LP")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Mpumalanga", "MP")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "North West", "NW")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Eastern Cape", "EC")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Western Cape", "WC")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Northern Cape", "NC")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Eastern-Cape", "EC")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Western-Cape", "WC")
+		zone = string_utils.ReplaceCaseInsensitive(zone, "Northern-Cape", "NC")
+
+		newZone = &zone
+	}
+
+	return
+}
diff --git a/api/README.md b/api/README.md
index a464c77100fefa86aa1fbef21ba3cdbeee40b810..1435a4923f7bcfe3e7200523bc42ea4eacc88135 100644
--- a/api/README.md
+++ b/api/README.md
@@ -204,9 +204,7 @@ API documentation will eventually be generated from doc tags in your structs. Th
 
 # API Logger
 
-API uses the go-utils/logger at the moment. Eventually it will be possible to use other loggers, and customise api logs. Audits can already be customised using ```api.WithAuditor()```.
-
-By default, go-utils/logger writes JSON records. At the start of the example api main function the logger is configured to write console format and log at DEBUG level... more to come on this front too including triggers for full debug in production on selected handlers or specific events, and logging different levels for selected code packages to avoid log clutter without having to remove debug from code.
+API Logs are written from global variables using logs.LogIncomingAPIRequest()
 
 # Router Path Parameters
 
diff --git a/api/api.go b/api/api.go
index 219cfa743d43c0f82ef1e4d728408e728eacfbaa..718cc6f74ed5e65c88f4c8cd31ed8c3882bc983d 100644
--- a/api/api.go
+++ b/api/api.go
@@ -7,9 +7,11 @@ import (
 
 	"github.com/aws/aws-lambda-go/lambda"
 	"gitlab.com/uafrica/go-utils/audit"
+	"gitlab.com/uafrica/go-utils/consumer/mem_consumer"
 	"gitlab.com/uafrica/go-utils/errors"
-	queues_mem "gitlab.com/uafrica/go-utils/queues/mem"
-	queues_sqs "gitlab.com/uafrica/go-utils/queues/sqs"
+	"gitlab.com/uafrica/go-utils/logs"
+	"gitlab.com/uafrica/go-utils/queues"
+	"gitlab.com/uafrica/go-utils/queues/sqs_producer"
 	"gitlab.com/uafrica/go-utils/service"
 	"gitlab.com/uafrica/go-utils/string_utils"
 )
@@ -52,19 +54,13 @@ type Api struct {
 }
 
 //wrap Service.WithStarter to return api, else cannot be chained
-func (api Api) WithStarter(name string, starter service.IStarter) Api {
+func (api Api) WithStarter(name string, starter service.Starter) Api {
 	api.Service = api.Service.WithStarter(name, starter)
 	return api
 }
 
 //wrap else cannot be chained
-func (api Api) WithAuditor(auditor audit.Auditor) Api {
-	api.Service = api.Service.WithAuditor(auditor)
-	return api
-}
-
-//wrap else cannot be chained
-func (api Api) WithProducer(producer service.Producer) Api {
+func (api Api) WithProducer(producer queues.Producer) Api {
 	api.Service = api.Service.WithProducer(producer)
 	return api
 }
@@ -123,15 +119,19 @@ func (api Api) WithEvents(eventHandlers map[string]interface{}) Api {
 //run and panic on error
 func (api Api) Run() {
 	//decide local or SQS
+	var producer queues.Producer
 	if (os.Getenv("LOG_LEVEL") == "debug") && api.localQueueEventHandlers != nil {
 		//use in-memory channels for async events
 		api.Debugf("Using in-memory channels for async events ...")
-		api = api.WithProducer(queues_mem.NewProducer(queues_mem.NewConsumer(api.Service, api.localQueueEventHandlers)))
+		producer = mem_consumer.NewProducer(mem_consumer.New(api.Service, api.localQueueEventHandlers))
 	} else {
 		//use SQS for async events
 		api.Debugf("Using SQS queue producer for async events ...")
-		api = api.WithProducer(queues_sqs.NewProducer(api.requestIDHeaderKey))
+		producer = sqs_producer.New(api.requestIDHeaderKey)
 	}
+	api = api.WithProducer(producer)
+	audit.Init(producer)
+	logs.Init(producer)
 
 	//run as an AWS Lambda function
 	lambda.Start(api.Handler)
diff --git a/api/context.go b/api/context.go
index eea6c5ca30dc49bf99117ef1869d2daab1dc9eb6..31253eb133f4e29e7b38b875bbd8fe56ded1adcf 100644
--- a/api/context.go
+++ b/api/context.go
@@ -1,15 +1,15 @@
 package api
 
 import (
-	"encoding/csv"
 	"encoding/json"
 	"reflect"
-	"strings"
 
 	"github.com/aws/aws-lambda-go/events"
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
 	"gitlab.com/uafrica/go-utils/reflection"
 	"gitlab.com/uafrica/go-utils/service"
+	"gitlab.com/uafrica/go-utils/struct_utils"
 )
 
 type Context interface {
@@ -20,8 +20,6 @@ type Context interface {
 	LogAPIRequestAndResponse(res events.APIGatewayProxyResponse, err error)
 }
 
-var contextInterfaceType = reflect.TypeOf((*Context)(nil)).Elem()
-
 type apiContext struct {
 	service.Context
 	request events.APIGatewayProxyRequest
@@ -59,8 +57,13 @@ func (ctx *apiContext) LogAPIRequestAndResponse(res events.APIGatewayProxyRespon
 //allocate struct for params, populate it from the URL parameters then validate and return the struct
 func (ctx apiContext) GetRequestParams(paramsStructType reflect.Type) (interface{}, error) {
 	paramsStructValuePtr := reflect.New(paramsStructType)
-	if err := ctx.setParamsInStruct("params", paramsStructType, paramsStructValuePtr.Elem()); err != nil {
-		return nil, errors.Wrapf(err, "failed to put query param values into struct")
+	nv := struct_utils.NamedValuesFromURL(ctx.request.QueryStringParameters, ctx.request.MultiValueQueryStringParameters)
+	unused, err := struct_utils.UnmarshalNamedValues(nv, paramsStructValuePtr.Interface())
+	if err != nil {
+		return nil, errors.Wrapf(err, "invalid parameters")
+	}
+	if len(unused) > 0 {
+		logger.Warnf("Unknown parameters: %+v", unused)
 	}
 	if err := ctx.applyClaim("params", paramsStructValuePtr.Interface()); err != nil {
 		return nil, errors.Wrapf(err, "failed to fill claims on params")
@@ -73,96 +76,6 @@ func (ctx apiContext) GetRequestParams(paramsStructType reflect.Type) (interface
 	return paramsStructValuePtr.Elem().Interface(), nil
 }
 
-//extract params into a struct value
-func (ctx apiContext) setParamsInStruct(name string, t reflect.Type, v reflect.Value) error {
-	for i := 0; i < t.NumField(); i++ {
-		tf := t.Field(i)
-		//enter into anonymous sub-structs
-		if tf.Anonymous {
-			if tf.Type.Kind() == reflect.Struct {
-				if err := ctx.setParamsInStruct(name+"."+tf.Name, t.Field(i).Type, v.Field(i)); err != nil {
-					return errors.Wrapf(err, "failed on parameters %s.%s", name, tf.Name)
-				}
-				continue
-			}
-			return errors.Errorf("parameters cannot parse into anonymous %s field %s", tf.Type.Kind(), tf.Type.Name())
-		}
-
-		//named field:
-		//use name from json tag, else lowercase of field name
-		n := (strings.SplitN(tf.Tag.Get("json"), ",", 2))[0]
-		if n == "" {
-			n = strings.ToLower(tf.Name)
-		}
-		if n == "" || n == "-" {
-			continue //skip fields without name
-		}
-
-		//see if this named param was specified
-		var paramStrValues []string
-		if paramStrValue, isDefined := ctx.request.QueryStringParameters[n]; isDefined {
-			//specified once in URL
-			if len(paramStrValue) >= 2 && paramStrValue[0] == '[' && paramStrValue[len(paramStrValue)-1] == ']' {
-				//specified as CSV inside [...] e.g. id=[1,2,3]
-				csvReader := csv.NewReader(strings.NewReader(paramStrValue[1 : len(paramStrValue)-1]))
-				var err error
-				paramStrValues, err = csvReader.Read()
-				if err != nil {
-					return errors.Wrapf(err, "invalid CSV: [%s]", paramStrValue)
-				}
-			} else {
-				//specified as single value only e.g. id=1
-				paramStrValues = []string{paramStrValue}
-			}
-		} else {
-			//specified multiple times e.g. id=1&id=2&id=3
-			paramStrValues = ctx.request.MultiValueQueryStringParameters[n]
-		}
-		if len(paramStrValues) == 0 {
-			continue //param has no value specified in URL
-		}
-
-		valueField := v.Field(i)
-		if valueField.Kind() == reflect.Ptr {
-			valueField.Set(reflect.New(valueField.Type().Elem()))
-			valueField = valueField.Elem()
-		}
-
-		//param is defined >=1 times in URL
-		if tf.Type.Kind() == reflect.Slice {
-			//this param struct field is a slice, iterate over all specified values
-			for i, paramStrValue := range paramStrValues {
-				paramValue, err := parseParamValue(paramStrValue, tf.Type.Elem())
-				if err != nil {
-					return errors.Wrapf(err, "invalid %s[%d]", n, i)
-				}
-				valueField.Set(reflect.Append(valueField, paramValue))
-			}
-		} else {
-			if len(paramStrValues) > 1 {
-				return errors.Errorf("parameter %s does not support multiple values [%s]", n, strings.Join(paramStrValues, ","))
-			}
-			//single value specified
-			paramValue, err := parseParamValue(paramStrValues[0], valueField.Type())
-			if err != nil {
-				return errors.Wrapf(err, "invalid %s", n)
-			}
-			valueField.Set(paramValue)
-		}
-	} //for each param struct field
-	return nil
-}
-
-func parseParamValue(s string, t reflect.Type) (reflect.Value, error) {
-	newValuePtr := reflect.New(t)
-	if err := json.Unmarshal([]byte("\""+s+"\""), newValuePtr.Interface()); err != nil {
-		if err := json.Unmarshal([]byte(s), newValuePtr.Interface()); err != nil {
-			return newValuePtr.Elem(), errors.Wrapf(err, "invalid \"%s\"", s)
-		}
-	}
-	return newValuePtr.Elem(), nil
-}
-
 func (ctx apiContext) GetRequestBody(requestStructType reflect.Type) (interface{}, error) {
 	requestStructValuePtr := reflect.New(requestStructType)
 	err := json.Unmarshal([]byte(ctx.request.Body), requestStructValuePtr.Interface())
diff --git a/api/handler.go b/api/handler.go
index f089e582b16407169e40dabae450ef73ebe573b9..5829cb2e8c13424359b0d6466442642f09cd1601 100644
--- a/api/handler.go
+++ b/api/handler.go
@@ -27,7 +27,7 @@ func NewHandler(fnc interface{}) (handler, error) {
 	//arg[0] must be a struct for params. It may be an empty struct, but
 	//all public fields require a json tag which we will use to math the URL param name
 	if err := validateStructType(fncType.In(0)); err != nil {
-		return h, errors.Wrapf(err, "second arg %v is not valid params struct type", fncType.In(0))
+		return h, errors.Wrapf(err, "first arg %v is not valid params struct type", fncType.In(0))
 	}
 	h.RequestParamsType = fncType.In(0)
 
diff --git a/api/lambda.go b/api/lambda.go
index 3d2479e19a8ff1b07802421cbae78317b94fdf1d..19edeae70990a2a3ce1df13b58f7af18159209c7 100644
--- a/api/lambda.go
+++ b/api/lambda.go
@@ -14,6 +14,7 @@ import (
 	"github.com/aws/aws-lambda-go/lambdacontext"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/logs"
 )
 
 func (api Api) NewContext(baseCtx context.Context, requestID string, request events.APIGatewayProxyRequest) (Context, error) {
@@ -57,7 +58,6 @@ func (api Api) Handler(baseCtx context.Context, apiGatewayProxyReq events.APIGat
 	if err != nil {
 		return res, err
 	}
-	
 
 	//report handler crashes
 	if api.crashReporter != nil {
@@ -99,14 +99,8 @@ func (api Api) Handler(baseCtx context.Context, apiGatewayProxyReq events.APIGat
 		if api.requestIDHeaderKey != "" {
 			res.Headers[api.requestIDHeaderKey] = Ctx.RequestID()
 		}
-		if err := api.Service.WriteValues(Ctx.StartTime(), time.Now(), Ctx.RequestID(), map[string]interface{}{
-			"direction":  "incoming",
-			"type":       "api",
-			"request_id": Ctx.RequestID(),
-			"request":    Ctx.Request(),
-			"response":   res},
-		); err != nil {
-			Ctx.Errorf("failed to audit: %+v", err)
+		if err := logs.LogIncomingAPIRequest(Ctx.StartTime(), Ctx.RequestID(), Ctx.Claim(), apiGatewayProxyReq, res); err != nil {
+			Ctx.Errorf("failed to log: %+v", err)
 		}
 	}()
 
@@ -132,7 +126,7 @@ func (api Api) Handler(baseCtx context.Context, apiGatewayProxyReq events.APIGat
 		}
 	}
 
-	Ctx.Debugf("HTTP %s %s ...\n", apiGatewayProxyReq.HTTPMethod, apiGatewayProxyReq.Resource)
+	Ctx.Tracef("HTTP %s %s ...\n", apiGatewayProxyReq.HTTPMethod, apiGatewayProxyReq.Resource)
 	Ctx.WithFields(map[string]interface{}{
 		"http_method":                Ctx.Request().HTTPMethod,
 		"path":                       Ctx.Request().Path,
@@ -221,7 +215,7 @@ func (api Api) Handler(baseCtx context.Context, apiGatewayProxyReq events.APIGat
 		// 	return
 		// }
 
-		Ctx.Debugf("Body: (%T) %+v", bodyStruct, bodyStruct)
+		Ctx.Tracef("Body: (%T) %+v", bodyStruct, bodyStruct)
 		args = append(args, reflect.ValueOf(bodyStruct))
 	}
 
@@ -266,15 +260,19 @@ func (api Api) Handler(baseCtx context.Context, apiGatewayProxyReq events.APIGat
 	if len(results) > 1 {
 		responseStruct := results[0].Interface()
 		Ctx.Debugf("Response type: %T", responseStruct)
-
-		var bodyBytes []byte
-		bodyBytes, err = json.Marshal(responseStruct)
-		if err != nil {
-			err = errors.Wrapf(err, "failed to encode response content")
-			return
+		if responseString, ok := responseStruct.(string); ok {
+			res.Headers["Content-Type"] = "application/json"
+			res.Body = responseString
+		} else {
+			var bodyBytes []byte
+			bodyBytes, err = json.Marshal(responseStruct)
+			if err != nil {
+				err = errors.Wrapf(err, "failed to encode response content")
+				return
+			}
+			res.Headers["Content-Type"] = "application/json"
+			res.Body = string(bodyBytes)
 		}
-		res.Headers["Content-Type"] = "application/json"
-		res.Body = string(bodyBytes)
 	} else {
 		//no content
 		delete(res.Headers, "Content-Type")
diff --git a/audit/audit.go b/audit/audit.go
deleted file mode 100644
index 2fdf2bfc864d40cf2e791f941d5c49fe775b98dd..0000000000000000000000000000000000000000
--- a/audit/audit.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package audit
-
-import "time"
-
-type Auditor interface {
-	WriteValues(startTime, endTime time.Time, requestID string, values map[string]interface{}) error
-	WriteEvent(requestID string, event Event) error
-}
diff --git a/audit/event.go b/audit/change.go
similarity index 79%
rename from audit/event.go
rename to audit/change.go
index dc61743472d209b2f5d99e39027e908a7198d0e7..e1d9649f761fa17677e7388a4e515ca45575d609 100644
--- a/audit/event.go
+++ b/audit/change.go
@@ -9,26 +9,60 @@ import (
 	"time"
 
 	"github.com/r3labs/diff/v2"
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/queues"
 	"gitlab.com/uafrica/go-utils/reflection"
 )
 
-type Event struct {
+var producer queues.Producer
+
+func Init(p queues.Producer) {
+	producer = p
+}
+
+func SaveDataChange(
+	requestID string,
+	source string,
+	eventType string,
+	orgValue interface{},
+	newValue interface{},
+) error {
+	if producer == nil {
+		return errors.Errorf("audit queue producer not set")
+	}
+
+	changeRecord, err := NewChangeRecord(source, eventType, orgValue, newValue)
+	if err != nil {
+		return errors.Wrapf(err, "fail to determine changes")
+	}
+	if _, err := producer.NewEvent("AUDIT").
+		Type("audit").
+		RequestID(requestID).
+		Send(changeRecord); err != nil {
+		return errors.Wrapf(err, "failed to send data change record")
+	}
+	return nil
+}
+
+type ChangeRecord struct {
 	ID        int64                  `json:"id"`
 	ObjectID  string                 `json:"object_id"`
 	Type      string                 `json:"type"`
 	Source    string                 `json:"source"`
 	Timestamp time.Time              `json:"timestamp"`
-	Change    map[string]interface{} `json:"change"`
+	Changes   map[string]interface{} `json:"changes"`
 }
 
+//purpose:
+//	Creates a record describing a change of data
 //parameters:
 //	source could be "" then defaults to "SYSTEM" or specify the user name that made the change
 //	orgValue and newValue could be nil
-//		they are compared and changes are logged
-func NewEvent(source string, eventType string, orgValue, newValue interface{}) (Event, error) {
+//		they are compared and changes are recorded
+func NewChangeRecord(source string, eventType string, orgValue, newValue interface{}) (ChangeRecord, error) {
 	changelog, err := diff.Diff(orgValue, newValue)
 	if err != nil {
-		return Event{}, err
+		return ChangeRecord{}, err
 	}
 
 	changes := map[string]interface{}{}
@@ -129,12 +163,12 @@ func NewEvent(source string, eventType string, orgValue, newValue interface{}) (
 		objectIDString = getStringValue(newValue, "Key")
 	}
 
-	event := Event{
+	event := ChangeRecord{
 		ObjectID:  objectIDString,
 		Source:    source,
 		Type:      eventType,
 		Timestamp: time.Now(),
-		Change:    changes,
+		Changes:   changes,
 	}
 	if event.Source == "" {
 		event.Source = "SYSTEM"
diff --git a/audit/file_audit.go b/audit/file_audit.go
deleted file mode 100644
index 7d340e1bd065ac93ddbe1485781df3cda14d4d57..0000000000000000000000000000000000000000
--- a/audit/file_audit.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package audit
-
-import (
-	"encoding/json"
-	"os"
-	"time"
-
-	"gitlab.com/uafrica/go-utils/errors"
-)
-
-//creates auditor that writes to file, which could be os.Stderr or os.Stdout for debugging
-func File(f *os.File) Auditor {
-	if f == nil {
-		panic(errors.Errorf("cannot create file auditor with f=nil"))
-	}
-	return &fileAudit{
-		f: f,
-	}
-}
-
-type fileAudit struct {
-	f *os.File
-}
-
-func (fa fileAudit) WriteValues(startTime, endTime time.Time, requestID string, values map[string]interface{}) error {
-	if fa.f == nil {
-		return errors.Errorf("auditor is closed")
-	}
-	obj := map[string]interface{}{
-		"start_time": startTime,
-		"end_time":   endTime,
-		"duration":   endTime.Sub(startTime),
-		"request_id": requestID,
-		"values":     values,
-	}
-	jsonObj, err := json.Marshal(obj)
-	if err != nil {
-		return errors.Wrapf(err, "failed to JSON encode audit values")
-	}
-	if _, err := fa.f.Write(jsonObj); err != nil {
-		return errors.Wrapf(err, "failed to write audit values to file")
-	}
-	return nil
-}
-
-func (fa fileAudit) WriteEvent(requestID string, event Event) error {
-	if fa.f == nil {
-		return errors.Errorf("auditor is closed")
-	}
-	obj := map[string]interface{}{
-		"start_time": event.Timestamp,
-		"end_time":   event.Timestamp,
-		"duration":   0,
-		"request_id": requestID,
-		"values":     event,
-	}
-	jsonObj, err := json.Marshal(obj)
-	if err != nil {
-		return errors.Wrapf(err, "failed to JSON encode audit event")
-	}
-	if _, err := fa.f.Write(jsonObj); err != nil {
-		return errors.Wrapf(err, "failed to write audit event to file")
-	}
-	return nil
-}
diff --git a/audit/no_audit.go b/audit/no_audit.go
deleted file mode 100644
index 930f4134f7c4f2a820bea8ca64e6982bd90d4656..0000000000000000000000000000000000000000
--- a/audit/no_audit.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package audit
-
-import (
-	"time"
-)
-
-//creates auditor that writes nothiong
-func None() Auditor {
-	return noAudit{}
-}
-
-type noAudit struct{}
-
-func (noAudit) WriteValues(startTime, endTime time.Time, requestID string, values map[string]interface{}) error {
-	return nil
-}
-
-func (noAudit) WriteEvent(requestID string, event Event) error {
-	return nil
-}
diff --git a/config/doc.go b/config/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b802b4802f5072c950e3d4cf84e10521e6d684e
--- /dev/null
+++ b/config/doc.go
@@ -0,0 +1,136 @@
+package config
+
+import (
+	"fmt"
+	"os"
+	"reflect"
+	"sort"
+	"strings"
+
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/struct_utils"
+)
+
+func Doc(f *os.File, showValues bool, format int) {
+	if f == nil {
+		return
+	}
+
+	entries := []docEntry{}
+	for prefix, structPtr := range prefixStructs {
+		prefixEntries := docStruct(prefix, reflect.TypeOf(structPtr).Elem(), reflect.ValueOf(structPtr).Elem())
+		if showValues {
+			nv := struct_utils.NamedValuesFromEnv(prefix)
+			for i, e := range prefixEntries {
+				name := strings.ToLower(e.Env[len(prefix)+1:])
+				if values, ok := nv[name]; ok {
+					e.Current = values
+					prefixEntries[i] = e
+					delete(nv, name)
+				}
+			}
+		}
+		entries = append(entries, prefixEntries...)
+	}
+
+	sort.Slice(entries, func(i, j int) bool { return entries[i].Env < entries[j].Env })
+
+	switch format {
+	case 1: //Mark Down
+		fmt.Fprintf(f, "# Configuration from Environment\n")
+		fmt.Fprintf(f, "\n")
+		if !showValues {
+			fmt.Fprintf(f, "|Environment|Type|Default|Description & Rules|\n")
+			fmt.Fprintf(f, "|---|---|---|---|\n")
+		} else {
+			fmt.Fprintf(f, "|Environment|Type|Default|Description & Rules|Values|\n")
+			fmt.Fprintf(f, "|---|---|---|---|---|\n")
+		}
+		for _, e := range entries {
+			text := e.Text
+			if text != "" && e.Rules != "" {
+				text += "; " + e.Rules
+			}
+			fmt.Fprintf(f, "|%s|%s|%s|%s|",
+				e.Env,
+				e.Type,
+				e.Default,
+				text)
+			if showValues {
+				if len(e.Current) == 0 {
+					fmt.Fprintf(f, "(Not Defined)|") //no values
+				} else {
+					if len(e.Current) == 1 {
+						fmt.Fprintf(f, "%s|", e.Current[0]) //only one value
+					} else {
+						fmt.Fprintf(f, "%s|", strings.Join(e.Current, ", ")) //multiple values
+					}
+				}
+			}
+			fmt.Fprintf(f, "\n")
+		}
+
+	default:
+		//just dump it
+		fmt.Fprintf(f, "=====[ CONFIGURATION ]=====\n")
+		for _, e := range entries {
+			fmt.Fprintf(f, "%+v\n", e)
+		}
+	}
+}
+
+func docStruct(prefix string, t reflect.Type, v reflect.Value) (entries []docEntry) {
+	logger.Debugf("docStruct(%s, %s)", prefix, t.Name())
+	entries = []docEntry{}
+	for i := 0; i < t.NumField(); i++ {
+		tf := t.Field(i)
+		if tf.Anonymous {
+			if tf.Type.Kind() == reflect.Struct {
+				entries = append(entries, docStruct(prefix, tf.Type, v.Field(i))...) //anonymous embedded sub-struct
+			}
+			continue //anonymous embedded non-struct
+		}
+
+		tag := strings.SplitN(tf.Tag.Get("json"), ",", 2)[0]
+		if tag == "" || tag == "-" {
+			continue //excluded field
+		}
+
+		fieldName := prefix + "_" + strings.ToUpper(tag)
+		switch tf.Type.Kind() {
+		case reflect.Struct:
+			entries = append(entries, docStruct(fieldName, tf.Type, v.Field(i))...) //anonymous embedded sub-struct
+
+		case reflect.Slice:
+			entries = append(entries, docEntry{
+				Env:     fieldName,
+				Type:    "list of " + tf.Type.Elem().Name(),
+				Text:    tf.Tag.Get("doc"),
+				Default: tf.Tag.Get("default"),
+				Rules:   tf.Tag.Get("rules"),
+				Value:   v.Field(i),
+			})
+
+		default:
+			entries = append(entries, docEntry{
+				Env:     fieldName,
+				Type:    tf.Type.Name(),
+				Text:    tf.Tag.Get("doc"),
+				Default: tf.Tag.Get("default"),
+				Rules:   tf.Tag.Get("rules"),
+				Value:   v.Field(i),
+			})
+		}
+	}
+	return entries
+}
+
+type docEntry struct {
+	Env     string
+	Type    string
+	Text    string
+	Default string
+	Rules   string
+	Value   reflect.Value
+	Current []string
+}
diff --git a/config/doc_example.md b/config/doc_example.md
new file mode 100644
index 0000000000000000000000000000000000000000..85df6e928665cb4fb7adf627941f767a58879922
--- /dev/null
+++ b/config/doc_example.md
@@ -0,0 +1,11 @@
+# Configuration from Environment
+
+|Environment|Type|Default|Description & Rules|Values|
+|---|---|---|---|---|
+|API_LOGS_CLEANUP_DAYS|int64||Nr of days to keep before cleanup. Default 31.|N/A|
+|API_LOGS_INDEX_NAME|string||Name of index for api-logs (lowercase alpha-numerics with dashes, default: uafrica-v3-api-logs)|N/A|
+|API_LOGS_MAX_RESPONSE_SIZE|int64||Maximum length of response body stored. Defaults to 1024.|N/A|
+|API_LOGS_SEARCH_ADDRESSES|list of string||List of server addresses. Requires at least one, e.g. "https://localhost:9200" for local testing|[https://search-uafrica-v3-api-logs-fefgiypvmb3sg5wqohgsbqnzvq.af-south-1.es.amazonaws.com/]|
+|API_LOGS_SEARCH_PASSWORD|string||User password for HTTP basic auth. Defaults to admin for local testing.|[Aiz}a4ee]|
+|API_LOGS_SEARCH_USERNAME|string||User name for HTTP basic auth. Defaults to admin for local testing.|[uafrica]|
+|AUDIT_MAX_RESPONSE_SIZE|int64||Maximum length of response body stored. Defaults to 1024.|N/A|
\ No newline at end of file
diff --git a/config/struct.go b/config/struct.go
index f52c44b08c5a2e3d962248d6b7b031256cc0929a..04bfb884bc8d265c4f505b898490923544e6a585 100644
--- a/config/struct.go
+++ b/config/struct.go
@@ -1,114 +1,89 @@
 package config
 
 import (
-	"encoding/json"
-	"os"
-	"reflect"
 	"regexp"
-	"sort"
-	"strconv"
-	"strings"
 
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/string_utils"
+	"gitlab.com/uafrica/go-utils/struct_utils"
 )
 
-func Load(prefix string, configPtr interface{}) error {
+var (
+	prefixStructs = map[string]interface{}{}
+)
+
+func LoadEnv(prefix string, configStructPtr interface{}) error {
+	return Load(prefix, configStructPtr, string_utils.EnvironmentKeyReader())
+}
+
+func Load(prefix string, configStructPtr interface{}, keyReader string_utils.KeyReader) error {
 	if !prefixRegex.MatchString(prefix) {
-		return errors.Errorf("invalid config prefix \"%s\"", prefix)
-	}
-	if configPtr == nil {
-		return errors.Errorf("Load(nil)")
+		return errors.Errorf("config(%s) invalid prefix", prefix)
 	}
-	t := reflect.TypeOf(configPtr)
-	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct {
-		return errors.Errorf("%T is not &struct", configPtr)
+
+	//store before load in case it fails to be still part of docs
+	prefixStructs[prefix] = configStructPtr
+
+	//read os.Getenv() or other reader...
+	nv := struct_utils.NamedValuesFromReader(prefix, keyReader)
+	logger.Debugf("nv: %+v", nv)
+
+	//parse into struct
+	unused, err := struct_utils.UnmarshalNamedValues(nv, configStructPtr)
+	if err != nil {
+		return errors.Wrapf(err, "config(%s) cannot load", prefix)
 	}
-	v := reflect.ValueOf(configPtr)
-	if err := load(prefix, t.Elem(), v.Elem()); err != nil {
-		return errors.Wrapf(err, "failed to load config with prefix %s", prefix)
+	if len(unused) > 0 {
+		//we still use os.Getenv() elsewhere, so some variables may not be in the struct
+		//e.g. AUDIT_QUEUE_URL is read from queues/sqs/producer which match config(prefix="AUDIT")
+		//so we cannot yet fail here, which we should, because config setting not used is often
+		//a reason for errors, when we try to configure something, then it does not work, and
+		//we cannot figure out why, but the value we did set, might just be misspelled etc.
+		//so, for now - do not fail here, just report the unused values
+		logger.Warnf("Note unused env (might be used elsewhere) for config(%s): %+v", prefix, unused)
+		//return errors.Errorf("config(%s): unknown %+v", prefix, unused)
 	}
-	if validator, ok := configPtr.(Validator); ok {
+
+	if validator, ok := configStructPtr.(Validator); ok {
 		if err := validator.Validate(); err != nil {
-			return errors.Wrapf(err, "invalid config with prefix %s", prefix)
+			return errors.Wrapf(err, "config(%s) is invalid", prefix)
 		}
 	}
 	return nil
 }
 
-type nameValue struct {
-	name  string
-	value string
-}
+func LoadRedis(prefix string, configStructPtr interface{}) error {
+	if !prefixRegex.MatchString(prefix) {
+		return errors.Errorf("config(%s) invalid prefix", prefix)
+	}
 
-func load(prefix string, t reflect.Type, ptrValue reflect.Value) error {
-	switch t.Kind() {
-	case reflect.Struct:
-		for i := 0; i < t.NumField(); i++ {
-			f := t.Field(i)
-			if err := load(prefix+"_"+strings.ToUpper(f.Name), f.Type, ptrValue.Field(i)); err != nil {
-				return errors.Wrapf(err, "cannot load field")
-			}
-		}
+	//store before load in case it fails to be still part of docs
+	prefixStructs[prefix] = configStructPtr
 
-	case reflect.Slice:
-		//expect JSON list of values or just one value
-		s := os.Getenv(prefix)
-		if s != "" {
-			if err := json.Unmarshal([]byte(s), ptrValue.Addr().Interface()); err != nil {
-				return errors.Wrapf(err, "cannot read env %s=%s into %s", prefix, s, t.Name())
-			}
-		} else {
-			//see if _1, _2, ... is used then construct a list with those values
-			//(only applies to list of strings)
-			values := map[string]string{}
-			for _, x := range os.Environ() {
-				parts := strings.SplitN(x, "=", 2)
-				if len(parts) == 2 && strings.HasPrefix(parts[0], prefix+"_") {
-					values[parts[0]] = parts[1]
-				}
-			}
-			if len(values) > 0 {
-				//add in sorted order
-				list := []nameValue{}
-				for n, v := range values {
-					list = append(list, nameValue{name: n, value: v})
-				}
-				sort.Slice(list, func(i, j int) bool {
-					return list[i].name < list[j].name
-				})
-				s := ""
-				for _, nv := range list {
-					if t.Elem().Kind() == reflect.String {
-						s += ",\"" + nv.value + "\"" //quoted
-					} else {
-						s += "," + nv.value //unquoted
-					}
-				}
-				s = "[" + s[1:] + "]"
-				if err := json.Unmarshal([]byte(s), ptrValue.Addr().Interface()); err != nil {
-					return errors.Wrapf(err, "cannot read env %s=%s into %s", prefix, s, t.Name())
-				}
-			}
-		}
+	//read os.Getenv()
+	nv := struct_utils.NamedValuesFromEnv(prefix)
 
-	case reflect.String:
-		s := os.Getenv(prefix)
-		if s != "" {
-			ptrValue.Set(reflect.ValueOf(s))
-		}
+	//parse into struct
+	unused, err := struct_utils.UnmarshalNamedValues(nv, configStructPtr)
+	if err != nil {
+		return errors.Wrapf(err, "config(%s) cannot load", prefix)
+	}
+	if len(unused) > 0 {
+		//we still use os.Getenv() elsewhere, so some variables may not be in the struct
+		//e.g. AUDIT_QUEUE_URL is read from queues/sqs/producer which match config(prefix="AUDIT")
+		//so we cannot yet fail here, which we should, because config setting not used is often
+		//a reason for errors, when we try to configure something, then it does not work, and
+		//we cannot figure out why, but the value we did set, might just be misspelled etc.
+		//so, for now - do not fail here, just report the unused values
+		logger.Warnf("Note unused env (might be used elsewhere) for config(%s): %+v", prefix, unused)
+		//return errors.Errorf("config(%s): unknown %+v", prefix, unused)
+	}
 
-	case reflect.Int64:
-		s := os.Getenv(prefix)
-		if s != "" {
-			i64, err := strconv.ParseInt(s, 10, 64)
-			if err != nil {
-				return errors.Errorf("%s=%s not integer value", prefix, s)
-			}
-			ptrValue.Set(reflect.ValueOf(i64))
+	if validator, ok := configStructPtr.(Validator); ok {
+		if err := validator.Validate(); err != nil {
+			return errors.Wrapf(err, "config(%s) is invalid", prefix)
 		}
-
-	default:
-		return errors.Errorf("cannot load config %s_... into %s kind %s", prefix, t.Name(), t.Kind())
 	}
 	return nil
 }
diff --git a/config/struct_test.go b/config/struct_test.go
index 2f153ef15b98181d2de259088dcdb9e1fcb5ac54..297664f93ac5c3899e7aabfb6239eb13adde201c 100644
--- a/config/struct_test.go
+++ b/config/struct_test.go
@@ -1,10 +1,15 @@
 package config_test
 
 import (
+	"encoding/json"
+	"fmt"
 	"os"
+	"strings"
 	"testing"
+	"time"
 
 	"gitlab.com/uafrica/go-utils/config"
+	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
 )
 
@@ -12,59 +17,136 @@ func TestLoad(t *testing.T) {
 	logger.SetGlobalFormat(logger.NewConsole())
 	logger.SetGlobalLevel(logger.LevelDebug)
 
-	os.Setenv("TEST_A", "123")
-	os.Setenv("TEST_B", "abc")
-	os.Setenv("TEST_C", "789")
+	//booleans
+	os.Setenv("TEST_VALUE_ENABLE_CACHE", "true")
+	os.Setenv("TEST_VALUE_DISABLE_LOG", "true")
+	os.Setenv("TEST_VALUE_ADMIN", "false")
 
-	//list of value must be valid JSON, i.e. if list of int or list of string, it must be unquoted or quoted as expected by JSON:
-	os.Setenv("TEST_L", "[1,2,3]")
-	os.Setenv("TEST_M", "[\"7\", \"8\"]")
+	//integers
+	os.Setenv("TEST_VALUE_MAX_SIZE", "12")
 
-	//list of string entries can also be defined with _1, _2, ... postfixes
-	//the key value must be unique but has no significance apart from ordering,
-	//so if you comment out _3, then _1, _2 and _4 will result in 3 entries in the list
-	//as long as they are unique keys
-	os.Setenv("TEST_N_1", "111")
-	os.Setenv("TEST_N_2", "222")
-	//os.Setenv("TEST_N_3", "333")
-	os.Setenv("TEST_N_4", "444")
+	os.Setenv("TEST_VALUE_SEQ1", "[4,5,6]") //list in one value
 
-	os.Setenv("TEST_P_1", "111")
-	os.Setenv("TEST_P_2", "222")
-	//os.Setenv("TEST_N_3", "333")
-	os.Setenv("TEST_P_4", "444")
+	os.Setenv("TEST_VALUE_SEQ2_10", "10") //numbered list elements
+	os.Setenv("TEST_VALUE_SEQ2_20", "20")
+	os.Setenv("TEST_VALUE_SEQ2_4", "4")
+	os.Setenv("TEST_VALUE_SEQ2_15", "15")
+	os.Setenv("TEST_VALUE_SEQ2", "100")
+
+	os.Setenv("TEST_VALUE_CUTOFF", "2021-11-20T12:00:00+02:00")
+	os.Setenv("TEST_VALUE_HOLIDAYS", "[2021-03-21,2021-04-27,2021-05-01,2021-06-16,2021-08-09,2021-12-16,2021-12-25]")
 
 	c := Config{}
-	if err := config.Load("TEST", &c); err != nil {
+	if err := config.LoadEnv("TEST_VALUE", &c); err != nil {
 		t.Fatalf("Cannot load config: %+v", err)
 	}
+	t.Logf("Loaded config: %+v", c)
 
-	if c.A != "123" || c.B != "abc" || c.C != 789 {
-		t.Fatalf("Loaded wrong values: %+v", c)
+	if !c.EnableCache || !c.DisableLog || c.Admin {
+		t.Fatalf("wrong bool values: %+v", c)
+	}
+	if c.MaxSize != 12 {
+		t.Fatalf("wrong nr values: %+v", c)
 	}
-	if len(c.L) != 3 || c.L[0] != 1 || c.L[1] != 2 || c.L[2] != 3 {
-		t.Fatalf("Loaded wrong values: %+v", c)
+	if len(c.Seq1) != 3 || c.Seq1[0] != 4 || c.Seq1[1] != 5 || c.Seq1[2] != 6 {
+		t.Fatalf("wrong seq1: %+v", c)
 	}
-	if len(c.M) != 2 || c.M[0] != "7" || c.M[1] != "8" {
-		t.Fatalf("Loaded wrong values for M: %+v", c.M)
+	if len(c.Seq2) != 5 || c.Seq2[0] != 100 || c.Seq2[1] != 4 || c.Seq2[2] != 10 || c.Seq2[3] != 15 || c.Seq2[4] != 20 {
+		t.Fatalf("wrong seq2: %+v", c)
 	}
-	t.Logf("M=%+v", c.M)
-	if len(c.N) != 3 || c.N[0] != "111" || c.N[1] != "222" || c.N[2] != "444" {
-		t.Fatalf("Loaded wrong values for N: %+v", c.N)
+	if c.Cutoff.UTC().Format("2006-01-02 15:04:05") != "2021-11-20 10:00:00" {
+		t.Fatalf("wrong cutoff")
 	}
-	t.Logf("N=%+v", c.N)
-	if len(c.P) != 3 || c.P[0] != 111 || c.P[1] != 222 || c.P[2] != 444 {
-		t.Fatalf("Loaded wrong values for P: %+v", c.N)
+	if len(c.Holidays) != 7 ||
+		c.Holidays[0].String() != "2021-03-21" ||
+		c.Holidays[1].String() != "2021-04-27" ||
+		c.Holidays[2].String() != "2021-05-01" ||
+		c.Holidays[3].String() != "2021-06-16" ||
+		c.Holidays[4].String() != "2021-08-09" ||
+		c.Holidays[5].String() != "2021-12-16" ||
+		c.Holidays[6].String() != "2021-12-25" {
+		t.Fatalf("wrong holidays")
+	}
+
+	{
+		t.Logf("config(TEST) = %+v", c)
+		e := json.NewEncoder(os.Stdout)
+		e.SetIndent("", "  ")
+		e.Encode(c)
 	}
-	t.Logf("P=%+v", c.P)
+
 }
 
 type Config struct {
-	A string   `json:"a"`
-	B string   `json:"b"`
-	C int64    `json:"c"`
-	L []int64  `json:"l"`
-	M []string `json:"m"`
-	N []string `json:"n"`
-	P []int64  `json:"p"`
+	EnableCache bool      `json:"enable_cache"`
+	DisableLog  bool      `json:"disable_log"`
+	Admin       bool      `json:"admin"`
+	MaxSize     int64     `json:"max_size"`
+	Seq1        []int     `json:"seq1"`
+	Seq2        []int64   `json:"seq2"`
+	Cutoff      time.Time `json:"cutoff"`
+	Holidays    []Date    `json:"holidays"`
+}
+
+type Date struct {
+	Y, M, D int
+}
+
+func (d *Date) Scan(value []byte) error {
+	s := strings.Trim(string(value), "\"")
+	v, err := time.ParseInLocation("2006-01-02", s, time.Now().Location())
+	if err != nil {
+		return errors.Errorf("%s is not CCYY-MM-DD", s)
+	}
+	d.Y = v.Year()
+	d.M = int(v.Month())
+	d.D = v.Day()
+	return nil
+}
+
+func (d *Date) UnmarshalJSON(value []byte) error {
+	return d.Scan(value)
+}
+
+func (d Date) String() string {
+	return fmt.Sprintf("%04d-%02d-%02d", d.Y, d.M, d.D)
+}
+
+func (d Date) MarshalJSON() ([]byte, error) {
+	return []byte("\"" + d.String() + "\""), nil
+}
+
+type SearchConfig struct {
+	Addresses []string `json:"addresses"`
+}
+
+type LogConfig struct {
+	SearchConfig `json:"search"`
+	Search2      SearchConfig `json:"search2"`
+	IndexName    string       `json:"index_name"`
+}
+
+func TestLogConfig(t *testing.T) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+	os.Setenv("LOG_INDEX_NAME", "abc")
+	os.Setenv("LOG_SEARCH_ADDRESSES", "[A,B,C]")
+	os.Setenv("LOG_SEARCH2_ADDRESSES", "[D,E,F]")
+	os.Setenv("LOG_OTHER", "1")
+	os.Setenv("LOG_SEARCH_OTHER", "2")
+	c := LogConfig{}
+	err := config.LoadEnv("LOG", &c)
+	if err != nil {
+		t.Fatalf("Failed: %+v", err)
+	}
+	t.Logf("Loaded: %+v", c)
+	if c.IndexName != "abc" {
+		t.Fatalf("wrong index_name:%s", c.IndexName)
+	}
+	if len(c.Addresses) != 3 || c.Addresses[0] != "A" || c.Addresses[1] != "B" || c.Addresses[2] != "C" {
+		t.Fatalf("wrong addresses:%+v", c.Addresses)
+	}
+	if len(c.Search2.Addresses) != 3 || c.Search2.Addresses[0] != "D" || c.Search2.Addresses[1] != "E" || c.Search2.Addresses[2] != "F" {
+		t.Fatalf("wrong search2 addresses:%+v", c.Search2.Addresses)
+	}
 }
diff --git a/consumer/README.md b/consumer/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..2d8745e39563b1157a71b7b26f79dc5929bf4e23
--- /dev/null
+++ b/consumer/README.md
@@ -0,0 +1,4 @@
+# Consumer
+
+Consumes a queue of events in the same way that API processes HTTP requests.
+Consumer is a type of service, just like API and CRON are also types of services.
diff --git a/consumer/check.go b/consumer/check.go
new file mode 100644
index 0000000000000000000000000000000000000000..3b77c4f07994ceb0a78d7ab36d1d80c46e1bba52
--- /dev/null
+++ b/consumer/check.go
@@ -0,0 +1,7 @@
+package consumer
+
+import "gitlab.com/uafrica/go-utils/service"
+
+type Checker interface {
+	Check(service.Context) (interface{}, error)
+}
diff --git a/queues/consumer.go b/consumer/consumer.go
similarity index 70%
rename from queues/consumer.go
rename to consumer/consumer.go
index ec4c50db8dc3f9b01f514ed232dac34285337e86..6278ae91c0d81c9bffcff33d5b5656191bd32640 100644
--- a/queues/consumer.go
+++ b/consumer/consumer.go
@@ -1,10 +1,10 @@
-package queues
+package consumer
 
 import "gitlab.com/uafrica/go-utils/service"
 
 //IConsumer is the interface implemented by both mem and sqs consumer
 type Consumer interface {
-	WithStarter(name string, starter service.IStarter) Consumer
+	WithStarter(name string, starter service.Starter) Consumer
 	Run()
 	ProcessFile(filename string) error
 }
diff --git a/queues/context.go b/consumer/context.go
similarity index 84%
rename from queues/context.go
rename to consumer/context.go
index c3321fecabf9238b4dd28da4616ede30bbb3dc99..ef182c8b0bfb033ed072928ec2d202cf4efecbaf 100644
--- a/queues/context.go
+++ b/consumer/context.go
@@ -1,4 +1,4 @@
-package queues
+package consumer
 
 import (
 	"context"
@@ -6,13 +6,14 @@ import (
 	"reflect"
 
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/queues"
 	"gitlab.com/uafrica/go-utils/service"
 )
 
 //Context within a consumer to process an event
 type Context interface {
 	service.Context
-	Event() service.Event                                   //the event start started this context in the consumer
+	Event() queues.Event                                    //the event start started this context in the consumer
 	GetRecord(recordType reflect.Type) (interface{}, error) //extract struct value from event data
 }
 
@@ -20,10 +21,10 @@ var contextInterfaceType = reflect.TypeOf((*Context)(nil)).Elem()
 
 type queuesContext struct {
 	service.Context
-	event service.Event
+	event queues.Event
 }
 
-func NewContext(service service.Service, event service.Event) (Context, error) {
+func NewContext(service service.Service, event queues.Event) (Context, error) {
 	baseCtx := context.Background()
 	serviceContext, err := service.NewContext(baseCtx, event.RequestIDValue, map[string]interface{}{
 		"message_type": event.TypeName,
@@ -39,7 +40,7 @@ func NewContext(service service.Service, event service.Event) (Context, error) {
 	return ctx, nil
 }
 
-func (ctx queuesContext) Event() service.Event {
+func (ctx queuesContext) Event() queues.Event {
 	return ctx.event
 }
 
diff --git a/queues/handler.go b/consumer/handler.go
similarity index 98%
rename from queues/handler.go
rename to consumer/handler.go
index 36ba9ec40d196177c7ce54f6e638471ba3935f0a..bc09bd749c64125e21c985d98ff088e1c89be7eb 100644
--- a/queues/handler.go
+++ b/consumer/handler.go
@@ -1,4 +1,4 @@
-package queues
+package consumer
 
 import (
 	"reflect"
diff --git a/queues/mem/README.md b/consumer/mem_consumer/README.md
similarity index 100%
rename from queues/mem/README.md
rename to consumer/mem_consumer/README.md
diff --git a/queues/mem/consumer.go b/consumer/mem_consumer/consumer.go
similarity index 65%
rename from queues/mem/consumer.go
rename to consumer/mem_consumer/consumer.go
index f2c33116343fc1f61fca466b4d4e5d7c8234ef1b..0732399897e6888c35a0ae9c2dcff63283ccc09b 100644
--- a/queues/mem/consumer.go
+++ b/consumer/mem_consumer/consumer.go
@@ -1,4 +1,4 @@
-package mem
+package mem_consumer
 
 import (
 	"encoding/json"
@@ -10,55 +10,50 @@ import (
 	"time"
 
 	"github.com/google/uuid"
-	"gitlab.com/uafrica/go-utils/audit"
+	"gitlab.com/uafrica/go-utils/consumer"
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logs"
 	"gitlab.com/uafrica/go-utils/queues"
 	"gitlab.com/uafrica/go-utils/service"
 )
 
-func NewConsumer(s service.Service, routes map[string]interface{}) queues.Consumer {
+func New(s service.Service, routes map[string]interface{}) consumer.Consumer {
 	if s == nil {
 		panic("NewConsumer(service==nil)")
 	}
-	router, err := queues.NewRouter(routes)
+	router, err := consumer.NewRouter(routes)
 	if err != nil {
 		panic(fmt.Sprintf("cannot create router: %+v", err))
 	}
-	c := &consumer{
+	c := &memConsumer{
 		Service: s,
 		router:  router,
 		queues:  map[string]*queue{},
 	}
 
 	//create a producer that will produce into this consumer
-	c.producer = &producer{
+	c.producer = &memProducer{
 		consumer: c,
 	}
 	c.Service = c.Service.WithProducer(c.producer)
 	return c
 }
 
-type consumer struct {
+type memConsumer struct {
 	sync.Mutex
 	service.Service
-	router   queues.Router
-	producer *producer
+	router   consumer.Router
+	producer queues.Producer
 	queues   map[string]*queue
 }
 
 //wrap Service.WithStarter to return cron, else cannot be chained
-func (consumer *consumer) WithStarter(name string, starter service.IStarter) queues.Consumer {
+func (consumer *memConsumer) WithStarter(name string, starter service.Starter) consumer.Consumer {
 	consumer.Service = consumer.Service.WithStarter(name, starter)
 	return consumer
 }
 
-//wrap else cannot be chained
-func (consumer *consumer) WithAuditor(auditor audit.Auditor) queues.Consumer {
-	consumer.Service = consumer.Service.WithAuditor(auditor)
-	return consumer
-}
-
-func (consumer *consumer) Queue(name string) (*queue, error) {
+func (consumer *memConsumer) Queue(name string) (*queue, error) {
 	consumer.Lock()
 	defer consumer.Unlock()
 	q, ok := consumer.queues[name]
@@ -66,7 +61,7 @@ func (consumer *consumer) Queue(name string) (*queue, error) {
 		q = &queue{
 			consumer: consumer,
 			name:     name,
-			ch:       make(chan service.Event),
+			ch:       make(chan queues.Event),
 		}
 		go q.run()
 		consumer.queues[name] = q
@@ -76,20 +71,20 @@ func (consumer *consumer) Queue(name string) (*queue, error) {
 
 //do not call this - when using local producer, the consumer is automatically running
 //for each queue you send to, and processing from q.run()
-func (consumer *consumer) Run() {
+func (consumer *memConsumer) Run() {
 	panic(errors.Errorf("DO NOT RUN LOCAL CONSUMER"))
 }
 
-func (consumer *consumer) ProcessFile(filename string) error {
+func (consumer *memConsumer) ProcessFile(filename string) error {
 	f, err := os.Open(filename)
 	if err != nil {
 		return errors.Wrapf(err, "failed to open queue event file %s", filename)
 	}
 	defer f.Close()
 
-	var event service.Event
+	var event queues.Event
 	if err := json.NewDecoder(f).Decode(&event); err != nil {
-		return errors.Wrapf(err, "failed to read service.Event from file %s", filename)
+		return errors.Wrapf(err, "failed to read queues.Event from file %s", filename)
 	}
 
 	q := queue{
@@ -107,16 +102,16 @@ func (consumer *consumer) ProcessFile(filename string) error {
 }
 
 type queue struct {
-	consumer *consumer
+	consumer *memConsumer
 	name     string
-	ch       chan service.Event
+	ch       chan queues.Event
 }
 
 func (q *queue) run() {
 	// logger.Debugf("Q(%s) Start", q.name)
 	for event := range q.ch {
 		//process in background because some event processing sends to itself then wait for some responses on new events on the same queue!!!
-		go func() {
+		go func(event queues.Event) {
 			// logger.Debugf("Q(%s) process start: %+v", q.name, event)
 			err := q.process(event)
 			if err != nil {
@@ -124,12 +119,12 @@ func (q *queue) run() {
 				// } else {
 				// 	q.consumer.Debugf("Q(%s) process success: %+v", q.name, err)
 			}
-		}()
+		}(event)
 	}
 	// logger.Debugf("Q(%s) STOPPED", q.name)
 }
 
-func (q *queue) process(event service.Event) error {
+func (q *queue) process(event queues.Event) error {
 	//todo: create context with logger
 	rand.Seed(time.Now().Unix())
 
@@ -137,7 +132,7 @@ func (q *queue) process(event service.Event) error {
 	// if q.crashReporter != nil {
 	// 	defer q.crashReporter.Catch(ctx)
 	// }
-	ctx, err := queues.NewContext(q.consumer.Service, event)
+	ctx, err := consumer.NewContext(q.consumer.Service, event)
 	if err != nil {
 		return err
 	}
@@ -147,7 +142,7 @@ func (q *queue) process(event service.Event) error {
 	if err != nil {
 		return errors.Wrapf(err, "unhandled event type(%v)", event.TypeName)
 	}
-	handler, ok := sqsHandler.(queues.Handler)
+	handler, ok := sqsHandler.(consumer.Handler)
 	if !ok {
 		return errors.Errorf("messageType(%v) unsupported signature: %T", event.TypeName, sqsHandler)
 	}
@@ -163,18 +158,18 @@ func (q *queue) process(event service.Event) error {
 		return errors.Wrapf(err, "invalid message body")
 	}
 
-	ctx.WithFields(map[string]interface{}{
-		"params": event.ParamValues,
-		"body":   event.BodyJSON,
-	}).Infof("RECV(%s) Queue(%s).Type(%s).Due(%s): (%T)%v",
-		"---", //not yet available here - not part of event, and in SQS I think it is passed in SQS layer, so need to extend local channel to include this along with event
-		q.name,
-		event.TypeName,
-		event.DueTime,
-		recordStruct,
-		recordStruct)
-
-	ctx.Debugf("message (%T) %+v", recordStruct, recordStruct)
+	//log if not internal queue
+	if q.name != "AUDIT" && q.name != "API_LOGS" {
+		ctx.WithFields(map[string]interface{}{
+			"params": event.ParamValues,
+			"body":   event.BodyJSON,
+		}).Infof("RECV(%s) Queue(%s).Type(%s).Due(%s)",
+			"---", //not yet available here - not part of event, and in SQS I think it is passed in SQS layer, so need to extend local channel to include this along with event
+			q.name,
+			event.TypeName,
+			event.DueTime)
+		ctx.Tracef("RECV(%s) Request(%T)%v", q.name, recordStruct, recordStruct)
+	}
 	args = append(args, reflect.ValueOf(recordStruct))
 
 	results := handler.FuncValue.Call(args)
@@ -185,7 +180,12 @@ func (q *queue) process(event service.Event) error {
 	return nil
 } //queue.process()
 
-func (q *queue) Send(event service.Event) (msgID string, err error) {
+func (q *queue) Send(event queues.Event) (msgID string, err error) {
+	startTime := time.Now()
+	defer func() {
+		logs.LogSQSSent(startTime, event.QueueName, event.TypeName, event.BodyJSON)
+	}()
+
 	event.MessageID = uuid.New().String()
 	q.ch <- event
 	return event.MessageID, nil
diff --git a/queues/mem/producer.go b/consumer/mem_consumer/producer.go
similarity index 54%
rename from queues/mem/producer.go
rename to consumer/mem_consumer/producer.go
index b32c4c44057d4e135c1e1c741ac94d8da2266237..f1b51f070368c46af97a88daa0d748d47ebe06e8 100644
--- a/queues/mem/producer.go
+++ b/consumer/mem_consumer/producer.go
@@ -1,29 +1,29 @@
-package mem
+package mem_consumer
 
 import (
+	"gitlab.com/uafrica/go-utils/consumer"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
 	"gitlab.com/uafrica/go-utils/queues"
-	"gitlab.com/uafrica/go-utils/service"
 )
 
 //can only produce locally if also consuming local
-func NewProducer(memConsumer queues.Consumer) service.Producer {
-	if memConsumer == nil {
-		panic(errors.Errorf("cannot product locally without consumer"))
+func NewProducer(consumer consumer.Consumer) queues.Producer {
+	if consumer == nil {
+		panic(errors.Errorf("cannot produce local events without mem consumer"))
 	}
-	mc, ok := memConsumer.(*consumer)
+	mc, ok := consumer.(*memConsumer)
 	if !ok {
-		panic(errors.Errorf("NewProducer(%T) is not a mem consumer", memConsumer))
+		panic(errors.Errorf("NewProducer(consumer=%T) is not a mem consumer", consumer))
 	}
 	return mc.producer
 }
 
-type producer struct {
-	consumer *consumer
+type memProducer struct {
+	consumer *memConsumer
 }
 
-func (producer *producer) Send(event service.Event) (string, error) {
+func (producer *memProducer) Send(event queues.Event) (string, error) {
 	logger.Debugf("MEM producer.queue(%s) Sending event %+v", event.QueueName, event)
 	q, err := producer.consumer.Queue(event.QueueName)
 	if err != nil {
@@ -38,3 +38,7 @@ func (producer *producer) Send(event service.Event) (string, error) {
 	logger.Debugf("MEM producer.queue(%s) SENT event %+v", event.QueueName, event)
 	return msgID, nil
 }
+
+func (producer *memProducer) NewEvent(queueName string) queues.Event {
+	return queues.NewEvent(producer, queueName)
+}
diff --git a/queues/router.go b/consumer/router.go
similarity index 99%
rename from queues/router.go
rename to consumer/router.go
index f2357365b0e2b7a16d10bfd8ab741c91f3b22ba7..744cf1a5d5ddb8b3f861861b8799b758dfb06e29 100644
--- a/queues/router.go
+++ b/consumer/router.go
@@ -1,4 +1,4 @@
-package queues
+package consumer
 
 import (
 	"fmt"
diff --git a/queues/sqs/consumer.go b/consumer/sqs_consumer/consumer.go
similarity index 63%
rename from queues/sqs/consumer.go
rename to consumer/sqs_consumer/consumer.go
index 1c31e5f6412fb8f29b15f016a645e9c12874326c..7974fa05718cc5f01836a239947673fd3e0f09e2 100644
--- a/queues/sqs/consumer.go
+++ b/consumer/sqs_consumer/consumer.go
@@ -1,4 +1,4 @@
-package sqs
+package sqs_consumer
 
 import (
 	"context"
@@ -16,17 +16,20 @@ import (
 	"github.com/aws/aws-lambda-go/lambdacontext"
 	"github.com/google/uuid"
 	"gitlab.com/uafrica/go-utils/audit"
+	"gitlab.com/uafrica/go-utils/consumer"
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logs"
 	"gitlab.com/uafrica/go-utils/queues"
+	"gitlab.com/uafrica/go-utils/queues/sqs_producer"
 	"gitlab.com/uafrica/go-utils/service"
 )
 
-func NewConsumer(requestIDHeaderKey string, routes map[string]interface{}) queues.Consumer {
+func New(requestIDHeaderKey string, routes map[string]interface{}) consumer.Consumer {
 	env := os.Getenv("ENVIRONMENT") //todo: support config loading for local dev and env for lambda in prod
 	if env == "" {
 		env = "dev"
 	}
-	router, err := queues.NewRouter(routes)
+	router, err := consumer.NewRouter(routes)
 	if err != nil {
 		panic(fmt.Sprintf("cannot create router: %+v", err))
 	}
@@ -40,44 +43,42 @@ func NewConsumer(requestIDHeaderKey string, routes map[string]interface{}) queue
 		}
 	}
 
+	producer := sqs_producer.New(requestIDHeaderKey)
 	s := service.New().
-		WithProducer(NewProducer(requestIDHeaderKey))
-	return consumer{
+		WithProducer(producer)
+	audit.Init(producer)
+	logs.Init(producer)
+
+	return sqsConsumer{
 		Service:             s,
 		env:                 env,
 		router:              router,
 		requestIDHeaderKey:  requestIDHeaderKey,
 		ConstantMessageType: sqsMessageType,
-		checks:              map[string]queues.ICheck{},
+		checks:              map[string]consumer.Checker{},
 	}
 }
 
-type consumer struct {
+type sqsConsumer struct {
 	service.Service
 	env                 string
-	router              queues.Router
+	router              consumer.Router
 	requestIDHeaderKey  string
 	ConstantMessageType string //from os.Getenv("SQS_MESSAGE_TYPE")
-	checks              map[string]queues.ICheck
+	checks              map[string]consumer.Checker
 }
 
 //wrap Service.WithStarter to return cron, else cannot be chained
-func (consumer consumer) WithStarter(name string, starter service.IStarter) queues.Consumer {
-	consumer.Service = consumer.Service.WithStarter(name, starter)
-	return consumer
-}
-
-//wrap else cannot be chained
-func (consumer consumer) WithAuditor(auditor audit.Auditor) queues.Consumer {
-	consumer.Service = consumer.Service.WithAuditor(auditor)
-	return consumer
+func (c sqsConsumer) WithStarter(name string, starter service.Starter) consumer.Consumer {
+	c.Service = c.Service.WithStarter(name, starter)
+	return c
 }
 
-func (consumer consumer) Run() {
-	lambda.Start(consumer.Handler)
+func (c sqsConsumer) Run() {
+	lambda.Start(c.Handler)
 }
 
-func (consumer consumer) ProcessFile(filename string) error {
+func (c sqsConsumer) ProcessFile(filename string) error {
 	f, err := os.Open(filename)
 	if err != nil {
 		return errors.Wrapf(err, "failed to open queue event file %s", filename)
@@ -89,7 +90,7 @@ func (consumer consumer) ProcessFile(filename string) error {
 		return errors.Wrapf(err, "failed to read sqs event from file %s", filename)
 	}
 
-	if consumer.Handler(
+	if c.Handler(
 		lambdacontext.NewContext(
 			context.Background(),
 			&lambdacontext.LambdaContext{
@@ -106,7 +107,7 @@ func (consumer consumer) ProcessFile(filename string) error {
 	return nil
 }
 
-func (consumer consumer) Handler(baseCtx context.Context, lambdaEvent events.SQSEvent) error {
+func (c sqsConsumer) Handler(baseCtx context.Context, lambdaEvent events.SQSEvent) error {
 	//todo: create context with logger
 	rand.Seed(time.Now().Unix())
 
@@ -115,17 +116,18 @@ func (consumer consumer) Handler(baseCtx context.Context, lambdaEvent events.SQS
 	// 	defer sqs.crashReporter.Catch(ctx)
 	// }
 
-	if consumer.ConstantMessageType != "" {
+	if c.ConstantMessageType != "" {
 		//legacy mode for fixed message type as used in shiplogic
 		//where the whole instance is started for a specific SQS_MESSAGE_TYPE defined in environment
-		handler, err := consumer.router.Route(consumer.ConstantMessageType)
+		handler, err := c.router.Route(c.ConstantMessageType)
 		if err != nil {
-			return errors.Wrapf(err, "messageType=%s not handled", consumer.ConstantMessageType) //checked on startup - should never get here!!!
+			return errors.Wrapf(err, "messageType=%s not handled", c.ConstantMessageType) //checked on startup - should never get here!!!
 		}
 
 		if msgHandler, ok := handler.(func(events.SQSEvent) error); !ok {
-			return errors.Wrapf(err, "SQS_MESSAGE_TYPE=%s: handler signature %T not supported", consumer.ConstantMessageType, handler)
+			return errors.Wrapf(err, "SQS_MESSAGE_TYPE=%s: handler signature %T not supported", c.ConstantMessageType, handler)
 		} else {
+			//call the handler
 			return msgHandler(lambdaEvent)
 		}
 	} else {
@@ -133,20 +135,36 @@ func (consumer consumer) Handler(baseCtx context.Context, lambdaEvent events.SQS
 		//process all message records in this event:
 		for messageIndex, message := range lambdaEvent.Records {
 			//get request-id for this message record
+			startTime := time.Now()
 			requestID := ""
-			if requestIDAttr, ok := message.MessageAttributes[consumer.requestIDHeaderKey]; ok {
+			if requestIDAttr, ok := message.MessageAttributes[c.requestIDHeaderKey]; ok {
 				requestID = *requestIDAttr.StringValue
 			}
 
 			messageType := ""
+			var requestToLog interface{}
+			var handlerErr error
+			requestToLog = message.Body //will be logged as string if failed before parsing body into struct
+			defer func() {
+				if err := logs.LogSQSRequest(
+					startTime,
+					requestID,
+					messageType,
+					requestToLog,
+					handlerErr,
+				); err != nil {
+					c.Errorf("failed to log: %+v", err)
+				}
+			}()
+
 			if messageTypeAttr, ok := message.MessageAttributes["type"]; !ok || messageTypeAttr.StringValue == nil {
-				consumer.Errorf("ignoring message without messageType") //todo: could support generic handler for these... not yet required
+				c.Errorf("ignoring message without messageType") //todo: could support generic handler for these... not yet required
 				continue
 			} else {
 				messageType = *messageTypeAttr.StringValue
 			}
 
-			event := service.Event{
+			event := queues.Event{
 				//producer:  nil,
 				MessageID:      message.MessageId,
 				QueueName:      "N/A", //not sure how to get queue name from lambda Event... would be good to log it, may be in os.Getenv(???)?
@@ -156,23 +174,26 @@ func (consumer consumer) Handler(baseCtx context.Context, lambdaEvent events.SQS
 				BodyJSON:       message.Body,
 			}
 
-			ctx, err := queues.NewContext(consumer.Service, event)
+			ctx, err := consumer.NewContext(c.Service, event)
 			if err != nil {
 				return err
 			}
 
-			ctx.WithFields(map[string]interface{}{
-				"message_index": messageIndex,
-				"message":       message,
-			}).Infof("Queue(%s) Start SQS Handler Event: %v", ctx.Event().QueueName, ctx.Event())
+			//log if not internal queue
+			if ctx.Event().QueueName != "AUDIT" && ctx.Event().QueueName != "API_LOGS" {
+				ctx.WithFields(map[string]interface{}{
+					"message_index": messageIndex,
+					"message":       message,
+				}).Infof("Queue(%s) Start SQS Handler Event: %v", ctx.Event().QueueName, ctx.Event())
+			}
 
 			//routing on messageType
-			sqsHandler, err := consumer.router.Route(messageType)
+			sqsHandler, err := c.router.Route(messageType)
 			if err != nil {
 				ctx.Errorf("Unhandled sqs messageType(%v): %v", messageType, err)
 				continue
 			}
-			handler, ok := sqsHandler.(queues.Handler)
+			handler, ok := sqsHandler.(consumer.Handler)
 			if !ok {
 				ctx.Errorf("messageType(%v) unsupported signature: %T", messageType, sqsHandler)
 				continue
@@ -189,13 +210,15 @@ func (consumer consumer) Handler(baseCtx context.Context, lambdaEvent events.SQS
 				ctx.Errorf("invalid message: %+v", err)
 				continue
 			}
+			requestToLog = recordStruct //replace string log with structured log
 
-			ctx.Debugf("message (%T) %+v", recordStruct, recordStruct)
+			ctx.Tracef("message (%T) %+v", recordStruct, recordStruct)
 			args = append(args, reflect.ValueOf(recordStruct))
 
 			results := handler.FuncValue.Call(args)
 			if len(results) > 0 && !results[0].IsNil() {
-				ctx.Errorf("handler failed: %+v", results[0].Interface().(error))
+				handlerErr = results[0].Interface().(error)
+				ctx.Errorf("handler failed: %+v", handlerErr)
 			}
 		}
 	}
diff --git a/cron/cron.go b/cron/cron.go
index 5a9dff266b6ef2411f276cefa5599b1199b839ed..114e106cfde429a8bed1e8cc5d741bb4d94fcd2e 100644
--- a/cron/cron.go
+++ b/cron/cron.go
@@ -8,7 +8,6 @@ import (
 	"github.com/aws/aws-lambda-go/lambda"
 	"github.com/aws/aws-lambda-go/lambdacontext"
 	"github.com/google/uuid"
-	"gitlab.com/uafrica/go-utils/audit"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
 	"gitlab.com/uafrica/go-utils/service"
@@ -44,17 +43,11 @@ type Cron struct {
 }
 
 //wrap Service.WithStarter to return cron, else cannot be chained
-func (cron Cron) WithStarter(name string, starter service.IStarter) Cron {
+func (cron Cron) WithStarter(name string, starter service.Starter) Cron {
 	cron.Service = cron.Service.WithStarter(name, starter)
 	return cron
 }
 
-//wrap else cannot be chained
-func (cron Cron) WithAuditor(auditor audit.Auditor) Cron {
-	cron.Service = cron.Service.WithAuditor(auditor)
-	return cron
-}
-
 //add a check to startup of each context
 //they will be called in the sequence they were added
 //if check return error, processing stops and err is returned
diff --git a/cron/handler.go b/cron/handler.go
index 956e14cb3d20992cfcacac4b34dc1f58a1e7b108..814a41ead1696e110d73ee03b7aad7f158175da5 100644
--- a/cron/handler.go
+++ b/cron/handler.go
@@ -22,7 +22,7 @@ func NewHandler(fnc interface{}) (Handler, error) {
 		return h, errors.Errorf("returns %d results instead of (error)", fncType.NumOut())
 	}
 
-	//arg[0] must implement interface queues.Context
+	//arg[0] must implement interface consumer.Context
 	if fncType.In(0) != contextInterfaceType &&
 		!fncType.In(0).Implements(contextInterfaceType) {
 		return h, errors.Errorf("first arg %v does not implement %v", fncType.In(0), contextInterfaceType)
diff --git a/examples/core/api/main.go b/examples/core/api/main.go
index b6907329733375ebc52cf6bbf257accd783ea2d7..5fe8aa1c8002719e75a26797e18943067342fc11 100644
--- a/examples/core/api/main.go
+++ b/examples/core/api/main.go
@@ -6,7 +6,6 @@ import (
 	"os"
 
 	"gitlab.com/uafrica/go-utils/api"
-	"gitlab.com/uafrica/go-utils/audit"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/examples/core/app"
 	"gitlab.com/uafrica/go-utils/examples/core/db"
@@ -22,7 +21,6 @@ func main() {
 		WithCheck("maintenance", maint{}).
 		WithCheck("rate", rateLimiter{}).
 		WithCORS(cors{}).
-		WithAuditor(audit.File(os.Stdout)).
 		WithEvents(app.QueueRoutes()). //only used when LOG_LEVEL="debug"
 		Run()
 }
diff --git a/examples/core/app/users/users.go b/examples/core/app/users/users.go
index 93e094a2aaf803f1c3809d6bb00b17d747458fc6..a7dd6cbbc321211c2ed3908b93e8fd2cde62a8f2 100644
--- a/examples/core/app/users/users.go
+++ b/examples/core/app/users/users.go
@@ -11,7 +11,6 @@ import (
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/examples/core/email"
 	"gitlab.com/uafrica/go-utils/logger"
-	"gitlab.com/uafrica/go-utils/service"
 )
 
 type User struct {
@@ -113,7 +112,7 @@ func Add(ctx api.Context, params noParams, newUser POSTUser) (User, error) {
 		Subject: "Welcome User",
 		Body:    "Your account has been created",
 	}
-	/*eventID*/ _, err := service.NewEvent(ctx, "notify").RequestID(ctx.RequestID()).Type("email").Delay(time.Second * 5).Params(map[string]string{}).Send(email)
+	/*eventID*/ _, err := ctx.NewEvent("notify").RequestID(ctx.RequestID()).Type("email").Delay(time.Second * 5).Params(map[string]string{}).Send(email)
 	if err != nil {
 		ctx.Errorf("failed to notify: %+v", err)
 	}
diff --git a/examples/core/cron/main.go b/examples/core/cron/main.go
index f4f400ffcb1c19be1e908276c0ec8d373502942b..66a494b7907815e92449cb379723cda9703ed363 100644
--- a/examples/core/cron/main.go
+++ b/examples/core/cron/main.go
@@ -25,7 +25,6 @@ func main() {
 
 	cron.New(app.CronRoutes()).
 		WithStarter("db", db.Connector("core")).
-		//WithAuditor(audit{}).
 		Run(invokeArnPtr)
 }
 
diff --git a/examples/core/db/database.go b/examples/core/db/database.go
index 8021c42f85802f829e1640840ef425c057abcfe7..f694c23fcf4cd0f6ce7af2867bedf22484fa39f8 100644
--- a/examples/core/db/database.go
+++ b/examples/core/db/database.go
@@ -7,14 +7,14 @@ import (
 	"gitlab.com/uafrica/go-utils/service"
 )
 
-func Connector(dbName string) service.IStarter {
+func Connector(dbName string) service.Starter {
 	return &connector{
 		name: dbName,
 		conn: 0,
 	}
 }
 
-//connector implements service.IStarter
+//connector implements service.Starter
 type connector struct {
 	name string
 	conn int
diff --git a/examples/core/email/notify.go b/examples/core/email/notify.go
index 206f2be9ee540314b853eeb68843298a2233467b..07a1149cec6d7854b88dfaf4cc71a3a7f96aad4b 100644
--- a/examples/core/email/notify.go
+++ b/examples/core/email/notify.go
@@ -1,8 +1,6 @@
 package email
 
-import (
-	"gitlab.com/uafrica/go-utils/queues"
-)
+import "gitlab.com/uafrica/go-utils/service"
 
 type Message struct {
 	From    string
@@ -13,7 +11,7 @@ type Message struct {
 	Body    string
 }
 
-func Notify(ctx queues.Context, msg Message) error {
+func Notify(ctx service.Context, msg Message) error {
 	ctx.Debugf("Pretending to send email: %+v", msg)
 	return nil
 }
diff --git a/examples/core/sqs/main.go b/examples/core/sqs/main.go
index 7a019a7cf89b1d58996f51440fdd44cfa659c49f..276fb5123898b4de7d95293b308ea8c7083e4649 100644
--- a/examples/core/sqs/main.go
+++ b/examples/core/sqs/main.go
@@ -4,10 +4,10 @@ import (
 	"flag"
 
 	"gitlab.com/uafrica/go-utils/config"
+	"gitlab.com/uafrica/go-utils/consumer/sqs_consumer"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/examples/core/db"
 	"gitlab.com/uafrica/go-utils/logger"
-	"gitlab.com/uafrica/go-utils/queues/sqs"
 )
 
 func main() {
@@ -16,7 +16,7 @@ func main() {
 
 	sqsRoutes := map[string]interface{}{}
 
-	consumer := sqs.NewConsumer("uafrica-request-id", sqsRoutes).
+	consumer := sqs_consumer.New("uafrica-request-id", sqsRoutes).
 		WithStarter("db", db.Connector("core"))
 
 	if reqFile != nil && *reqFile != "" {
diff --git a/logger/global.go b/logger/global.go
index c04253667b5c9dff7402c2d2a5e853fce8cd5478..ca25a0273c19b7227d0b5d4106cb02ecad9338e1 100644
--- a/logger/global.go
+++ b/logger/global.go
@@ -76,3 +76,11 @@ func Debugf(format string, args ...interface{}) {
 func Debug(args ...interface{}) {
 	globalLogger.log(LevelDebug, 1, fmt.Sprint(args...))
 }
+
+func Tracef(format string, args ...interface{}) {
+	globalLogger.log(LevelTrace, 1, fmt.Sprintf(format, args...))
+}
+
+func Trace(args ...interface{}) {
+	globalLogger.log(LevelTrace, 1, fmt.Sprint(args...))
+}
diff --git a/logger/level.go b/logger/level.go
index 3014994e7f9fa39006368bf8ad965ffb44e3ced9..2546da64324ae5f999ad05d10ac91970ba8b0096 100644
--- a/logger/level.go
+++ b/logger/level.go
@@ -16,6 +16,8 @@ func (level Level) String() string {
 		return "info"
 	case LevelDebug:
 		return "debug"
+	case LevelTrace:
+		return "trace"
 	}
 	return fmt.Sprintf("Level(%d)", level)
 }
@@ -30,4 +32,5 @@ const (
 	LevelWarn
 	LevelInfo
 	LevelDebug
+	LevelTrace
 )
diff --git a/logger/logger.go b/logger/logger.go
index 736846d0ec5117c1f6090c2bf3b2061c87d5a49f..6899dc3300b8a60bbcd33d11ccacc8c1941662de 100644
--- a/logger/logger.go
+++ b/logger/logger.go
@@ -20,6 +20,8 @@ type Logger interface {
 	Info(args ...interface{})
 	Debugf(format string, args ...interface{})
 	Debug(args ...interface{})
+	Tracef(format string, args ...interface{})
+	Trace(args ...interface{})
 
 	WithFields(data map[string]interface{}) logger
 }
@@ -87,6 +89,14 @@ func (l logger) Debug(args ...interface{}) {
 	l.log(LevelDebug, 1, fmt.Sprint(args...))
 }
 
+func (l logger) Tracef(format string, args ...interface{}) {
+	l.log(LevelTrace, 1, fmt.Sprintf(format, args...))
+}
+
+func (l logger) Trace(args ...interface{}) {
+	l.log(LevelTrace, 1, fmt.Sprint(args...))
+}
+
 func (l logger) log(level Level, skip int, msg string) {
 	if level <= l.level && l.writer != nil {
 		entry := Entry{
diff --git a/logs/README.md b/logs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..75ec01279321db81b613de502bc76e92867d3974
--- /dev/null
+++ b/logs/README.md
@@ -0,0 +1,43 @@
+# Logs
+
+This package provides functions to log API, SQS and CRON events, capturing for example an API method and path along with request and response details, or SQS event details along with the time it spent in the queue before being processed.
+
+These logs are sent to a SQS queue. The handling of the queue event and capturing of the log, is done as SQS handlers, not as part of this package.
+
+Do not confuse this with logger, which write free-format log entries to stdout.
+
+# API-LOGS
+
+At the end of each API handler, an api-log is captured to describe the incoming API request and response. Also part of this log is a list of actions takens during the handler, including:
+* API calls made
+* SQS Events sent 
+* Database Statements executed
+* Time slept
+
+Each of those has a start/end time and duration, and they are summed and it should add up to most of the API-Log total duration.
+If there is a big difference between the summed time and the total duration, then we are doing something that takes time that we are not monitoring which should be investigated.
+The total time spent sleeping, waiting for db calls, waiting for outgoing API calls, is logged in the API log.
+This can be logged and broken down per path and method to see where the API is spending most time, and that could be investigated and optimised to improvie performance.
+
+# SQS-LOGS
+
+SQS logs are written at the end of a SQS event handler, similar to API logs.
+Since SQS is used to write API logs, those handlers should not be logged, as it will create a circular infinite queue ramping up huge bills.
+To be safe, SQS logs are therefore DISABLED by default.
+It should only be enabled for things like provider rate requests or any SQS handler that is part of the functionality of the system doing async work, not handlers that are part of the infrastructure.
+
+SQS log will also write to the API_LOGS queue and the same index in OpenSearch (can review and change this in future)
+It logs with method "SQS" and path is the message type.
+That means we can log durations and through put in the same way and on the same graph as API when needed
+It also captures the actions taken as part of the handler, in the same way it is captured for API.
+
+So when one finds some action takes too long in API, and move it to an SQS handler, the change will be visibile on the dashboard and indicate the improvement or not if your change did not have the desired effect.
+
+That it is idea.
+
+We can easily disable SQS logs and we can easily move it to another index in OpenSearch if necessary. Will have to try it for a while an see if it is useful in the current form or not.
+
+# CRON-LOGS
+
+In the same way we log API/SQS, it will be useful to monitor crons with a bit of output, e.g. nr of items deleted by a janitor etc.
+One can get that currently from cloud watch if the logs are not disabled, and CloudWatch should ideally not be source of metrics, but that is currently the case, so not changing it yet.
\ No newline at end of file
diff --git a/logs/action.go b/logs/action.go
new file mode 100644
index 0000000000000000000000000000000000000000..23f1a6176a8519e9e61b37a4fa67db217dc68d69
--- /dev/null
+++ b/logs/action.go
@@ -0,0 +1,321 @@
+package logs
+
+import (
+	"encoding/json"
+	"sync"
+	"time"
+)
+
+//Call LogOutgoingAPIRequest() after calling an API end-point as part of a handler,
+//to capture the details
+//and add it to the current handler log story for reporting/metrics
+func LogOutgoingAPIRequest(url string, method string, requestBody string, responseBody string, responseCode int, startTime time.Time) error {
+	endTime := time.Now()
+	log := ApiCallLog{
+		URL:          url,
+		Method:       method,
+		ResponseCode: responseCode,
+	}
+	if requestBody != "" {
+		log.Request = &BodyLog{
+			BodySize: len(requestBody),
+			Body:     requestBody,
+		}
+	}
+	if responseBody != "" {
+		log.Response = &BodyLog{
+			BodySize: len(responseBody),
+			Body:     responseBody,
+		}
+	}
+
+	actionListMutex.Lock()
+	actionList = append(actionList, ActionLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		Type:      ActionTypeApiCall,
+		ApiCall:   &log,
+	})
+	actionListMutex.Unlock()
+
+	return nil
+} //LogOutgoingAPIRequest()
+
+//Call LogSQL() after executing any SQL query
+//to capture the details
+//and add it to the current handler log story for reporting/metrics
+func LogSQL(
+	startTime time.Time,
+	sql string,
+	rowsCount int, //optional nr of rows to report, else 0
+	ids []int64, //optional list of ids to report, else nil
+	err error, //only if failed, else nil
+) {
+	endTime := time.Now()
+	log := SQLQueryLog{
+		SQL:       sql,
+		RowCount:  rowsCount,
+		InsertIDs: ids,
+	}
+	if err != nil {
+		log.Error = err.Error()
+	}
+	actionListMutex.Lock()
+	actionList = append(actionList, ActionLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		Type:      ActionTypeSqlQuery,
+		SQLQuery:  &log,
+	})
+	actionListMutex.Unlock()
+}
+
+//Call LogSQSSent() after sending an SQS event
+//to capture the details
+//and add it to the current handler log story for reporting/metrics
+func LogSQSSent(startTime time.Time, queueName string, messageType string, request interface{}) {
+	//do not log internal events sent to audit/api-log
+	if queueName == "API_LOGS" || queueName == "AUDIT" {
+		return
+	}
+
+	endTime := time.Now()
+	log := SQSSentLog{
+		QueueName:   queueName,
+		MessageType: messageType,
+	}
+	if request != nil {
+		if requestString, ok := request.(string); ok {
+			log.Request = &BodyLog{
+				BodySize: len(requestString), //do not marshal, else we have double escaped JSON
+				Body:     requestString,
+			}
+		} else {
+			jsonRequest, _ := json.Marshal(request)
+			log.Request = &BodyLog{
+				BodySize: len(jsonRequest),
+				Body:     string(jsonRequest),
+			}
+		}
+	}
+	actionListMutex.Lock()
+	actionList = append(actionList, ActionLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		Type:      ActionTypeSqsSent,
+		SQSSent:   &log,
+	})
+	actionListMutex.Unlock()
+}
+
+func LogSearch(startTime time.Time, index string, query string) {
+	endTime := time.Now()
+	actionListMutex.Lock()
+	actionList = append(actionList, ActionLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		Type:      ActionTypeSearch,
+		Search: &SearchLog{
+			Index: index,
+			Query: query,
+		},
+	})
+	actionListMutex.Unlock()
+}
+
+//Call LogSleep() after doing time.Sleep()
+//to capture the details
+//and add it to the current handler log story for reporting/metrics
+func LogSleep(startTime time.Time) {
+	endTime := time.Now()
+	actionListMutex.Lock()
+	actionList = append(actionList, ActionLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		Type:      ActionTypeSleep,
+	})
+	actionListMutex.Unlock()
+}
+
+var (
+	actionListMutex sync.Mutex
+	actionList      = []ActionLog{}
+)
+
+type ActionLog struct {
+	StartTime time.Time    `json:"start_time"`
+	EndTime   time.Time    `json:"end_time"`
+	DurMs     int64        `json:"duration_ms"` //duration in milliseconds
+	Type      ActionType   `json:"type" doc:"Type is api-call|sqs-sent|sql-query|sleep"`
+	ApiCall   *ApiCallLog  `json:"api_call,omitempty"`
+	SQSSent   *SQSSentLog  `json:"sqs_sent,omitempty"`
+	SQLQuery  *SQLQueryLog `json:"sql_query,omitempty"`
+	Search    *SearchLog   `json:"search"`
+}
+
+func (action ActionLog) Relative(relTime time.Time) RelativeActionLog {
+	return RelativeActionLog{
+		StartMs:  action.StartTime.Sub(relTime).Milliseconds(),
+		EndMs:    action.EndTime.Sub(relTime).Milliseconds(),
+		DurMs:    action.DurMs,
+		Type:     action.Type,
+		ApiCall:  action.ApiCall,
+		SQSSent:  action.SQSSent,
+		SQLQuery: action.SQLQuery,
+	}
+}
+
+type RelativeActionLog struct {
+	StartMs  int64        `json:"start_ms" doc:"Start time in milliseconds after start timestamp"`
+	EndMs    int64        `json:"end_ms" doc:"End time in milliseconds after start timestamp"`
+	DurMs    int64        `json:"duration_ms"` //duration in milliseconds
+	Type     ActionType   `json:"type" doc:"Type is api-call|sqs-sent|sql-query|sleep" search:"keyword"`
+	ApiCall  *ApiCallLog  `json:"api_call,omitempty"`
+	SQSSent  *SQSSentLog  `json:"sqs_sent,omitempty"`
+	SQLQuery *SQLQueryLog `json:"sql_query,omitempty"`
+}
+
+type ActionType string
+
+var ActionTypeList = []ActionType{
+	ActionTypeNone,
+	ActionTypeApiCall,
+	ActionTypeSqsSent,
+	ActionTypeSqlQuery,
+	ActionTypeSearch,
+	ActionTypeSleep,
+}
+
+const (
+	ActionTypeNone     ActionType = "none"
+	ActionTypeApiCall  ActionType = "api-call"
+	ActionTypeSqsSent  ActionType = "sqs-sent"
+	ActionTypeSqlQuery ActionType = "sql-query"
+	ActionTypeSearch   ActionType = "search"
+	ActionTypeSleep    ActionType = "sleep"
+)
+
+//APICallLog captures details of an outgoing API call made from a handler
+type ApiCallLog struct {
+	URL          string   `json:"url" search:"keyword"`
+	Method       string   `json:"method" search:"keyword"`
+	ResponseCode int      `json:"response_code" search:"keyword"`
+	Request      *BodyLog `json:"request,omitempty"`
+	Response     *BodyLog `json:"response,omitempty"`
+}
+
+type BodyLog struct {
+	BodySize int    `json:"body_size"`
+	Body     string `json:"body"`
+}
+
+//SQSSentLog captures details of an SQS event sent from a handler
+type SQSSentLog struct {
+	QueueName   string   `json:"queue_name" search:"keyword"`
+	MessageType string   `json:"message_type" search:"keyword"`
+	Request     *BodyLog `json:"request,omitempty"`
+}
+
+//SQLQueryLog captures details of an SQL query executed from a handler resulting in either rows returned, ids inserted or an error
+type SQLQueryLog struct {
+	SQL       string  `json:"sql"`
+	RowCount  int     `json:"row_count,omitempty"`
+	InsertIDs []int64 `json:"insert_ids,omitempty"`
+	Error     string  `json:"error,omitempty"`
+}
+
+type SearchLog struct {
+	Index string `json:"index"`
+	Query string `json:"query"`
+}
+
+//compile the relative action list that took place during this handler
+//copy then reset actionList for the next handler
+//we copy it with relation to this API's start..end time, rather than full timestamps, which are hard to read in the list
+//start and end are current total handler period that actions should be inside that
+func relativeActionList(startTime, endTime time.Time) []RelativeActionLog {
+	actionListMutex.Lock()
+	defer func() {
+		//after copy/discard, reset (global!) action list for the next handler
+		actionList = []ActionLog{}
+		actionListMutex.Unlock()
+	}()
+
+	cfg := currentLogConfig()
+	if !cfg.ActionsKeep {
+		return nil
+	}
+
+	//todo: runtime config: load temporary from REDIS after N seconds
+	//which will allow us to monitor better for a short while during trouble shooting
+	//then something like this to reload every 5min (5min could also be part of config)
+	//	if dynamicExpireTime.Before(time.Now()) {
+	//		dynamicApiConfig := apiConfig //loaded from env at startup
+	//		...look for keys in REDIS and override ...
+	//		dynamicExpireTime = time.Now().Add(time.Minute*5)
+	//	}
+	//	do this in go routing with sleep... so handlers do not have to check :-)
+	//	and it can also be used for api-log part that is not action list, e.g. for api-log req/res body len etc...
+	relActionList := []RelativeActionLog{}
+	for _, action := range actionList {
+		if action.EndTime.Before(startTime) || action.StartTime.After(endTime) {
+			continue //not expected - skip actions outside log window
+		}
+
+		//apply reduction filters to limit string lengths
+		switch action.Type {
+		case ActionTypeNone:
+		case ActionTypeSqlQuery:
+			if action.SQLQuery != nil && len(action.SQLQuery.SQL) > int(cfg.ActionsMaxSQLLength) {
+				action.SQLQuery.SQL = action.SQLQuery.SQL[:cfg.ActionsMaxSQLLength]
+			}
+		case ActionTypeSqsSent:
+			if action.SQSSent != nil && action.SQSSent.Request != nil && len(action.SQSSent.Request.Body) > int(cfg.ActionsMaxSQSReqBodyLength) {
+				action.SQSSent.Request.Body = action.SQSSent.Request.Body[:cfg.ActionsMaxSQSReqBodyLength]
+			}
+		case ActionTypeApiCall:
+			if action.ApiCall != nil {
+				if action.ApiCall.Request != nil && len(action.ApiCall.Request.Body) > int(cfg.ActionsMaxAPIReqBodyLength) {
+					action.ApiCall.Request.Body = action.ApiCall.Request.Body[:cfg.ActionsMaxAPIReqBodyLength]
+				}
+				if action.ApiCall.Response != nil && len(action.ApiCall.Response.Body) > int(cfg.ActionsMaxAPIResBodyLength) {
+					action.ApiCall.Response.Body = action.ApiCall.Response.Body[:cfg.ActionsMaxAPIResBodyLength]
+				}
+			}
+		case ActionTypeSearch:
+			if action.Search != nil {
+				if len(action.Search.Query) > int(cfg.ActionsMaxSearchQueryLength) {
+					action.Search.Query = action.Search.Query[:cfg.ActionsMaxSearchQueryLength]
+				}
+			}
+		}
+
+		//make relative and append to the list
+		relActionList = append(relActionList, action.Relative(startTime))
+	}
+
+	//also append to the list any nonAction periods greater than thresholdMs
+	//to indicate significant gaps in the action list that we did not account for
+	thresholdMs := int64(50)
+	//make period list, remove all action periods, then we're left with non-action periods :-)
+	nonActionPeriods := NewPeriods(startTime, endTime)
+	for _, action := range actionList {
+		nonActionPeriods = nonActionPeriods.Without(Period{Start: action.StartTime, End: action.EndTime})
+	}
+	for _, nonAction := range nonActionPeriods {
+		if nonAction.Duration().Milliseconds() > thresholdMs {
+			relActionList = append(relActionList, ActionLog{
+				StartTime: nonAction.Start,
+				EndTime:   nonAction.End,
+				DurMs:     nonAction.Duration().Milliseconds(),
+				Type:      ActionTypeNone,
+			}.Relative(startTime))
+		}
+	}
+	return relActionList
+}
diff --git a/logs/api-logs.go b/logs/api-logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..da4c1b5ab9a192cb688f5f275f2a9a55f899132b
--- /dev/null
+++ b/logs/api-logs.go
@@ -0,0 +1,151 @@
+package logs
+
+import (
+	"net/http"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/aws/aws-lambda-go/events"
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/queues"
+)
+
+var producer queues.Producer
+
+func Init(p queues.Producer) {
+	producer = p
+}
+
+//Call this at the end of an API request handler to capture the req/res as well as all actions taken during the processing
+//(note: action list is only reset when this is called - so must be called after each handler, else action list has to be reset at the start)
+func LogIncomingAPIRequest(startTime time.Time, requestID string, claim map[string]interface{}, req events.APIGatewayProxyRequest, res events.APIGatewayProxyResponse) error {
+	if producer == nil {
+		return errors.Errorf("logs queue producer not set")
+	}
+
+	//todo: filter out some noisy (method+path)
+	logger.Debugf("claim: %+v", claim)
+
+	endTime := time.Now()
+
+	var authType string
+	var authUsername string
+	if req.RequestContext.Identity.CognitoAuthenticationType != "" {
+		authType = "cognito"
+		split := strings.Split(req.RequestContext.Identity.CognitoAuthenticationProvider, ":")
+		if len(split) > 0 {
+			authUsername = split[len(split)-1] //= part after last ':'
+		}
+	} else {
+		authType = "iam"
+		split := strings.Split(req.RequestContext.Identity.UserArn, ":user/")
+		if len(split) > 0 {
+			authUsername = split[len(split)-1] //= part after ':user/'
+		}
+	}
+
+	username, _ := claim["Username"].(string)
+	accountID, _ := claim["AccountID"].(int64)
+	if accountID == 0 {
+		if accountIDParam, ok := req.QueryStringParameters["account_id"]; ok {
+			if i64, err := strconv.ParseInt(accountIDParam, 10, 64); err == nil && i64 > 0 {
+				accountID = i64
+			}
+		}
+	}
+	apiLog := ApiLog{
+		StartTime:           startTime,
+		EndTime:             endTime,
+		DurMs:               endTime.Sub(startTime).Milliseconds(),
+		Method:              req.HTTPMethod,
+		Address:             req.RequestContext.DomainName,
+		Path:                req.Path,
+		ResponseCode:        res.StatusCode,
+		RequestID:           requestID,
+		InitialAuthType:     authType,
+		InitialAuthUsername: authUsername,
+		SourceIP:            req.RequestContext.Identity.SourceIP,
+		UserAgent:           req.RequestContext.Identity.UserAgent,
+		Username:            username,
+		AccountID:           accountID,
+		Request: ApiLogRequest{
+			Headers:         req.Headers,
+			QueryParameters: req.QueryStringParameters,
+			BodySize:        len(req.Body),
+			Body:            req.Body,
+		},
+		Response: ApiLogResponse{
+			Headers:  res.Headers,
+			BodySize: len(res.Body),
+			Body:     res.Body,
+		},
+		Actions: nil,
+	}
+
+	//compile action list
+	apiLog.Actions = relativeActionList(apiLog.StartTime, apiLog.EndTime)
+
+	//sort action list on startTime, cause actions are added when they end, i.e. ordered by end time
+	//and all non-actions were appended at the end of the list
+	sort.Slice(apiLog.Actions, func(i, j int) bool { return apiLog.Actions[i].StartMs < apiLog.Actions[j].StartMs })
+
+	//also copy multi-value query parameters to the log as CSV array values
+	for n, as := range req.MultiValueQueryStringParameters {
+		apiLog.Request.QueryParameters[n] = "[" + strings.Join(as, ",") + "]"
+	}
+
+	//todo: filter out excessive req/res body content per (method+path)
+	//todo: also need to do for all actions...
+	if apiLog.Method == http.MethodGet {
+		apiLog.Response.Body = "<not logged>"
+	}
+
+	logger.Debugf("Send api-log to SQS: %+v", apiLog)
+
+	//todo: filter out sensitive values (e.g. OTP)
+	if _, err := producer.NewEvent("API_LOGS").
+		Type("api-log").
+		RequestID(apiLog.RequestID).
+		Send(apiLog); err != nil {
+		return errors.Wrapf(err, "failed to send api-log")
+	}
+	return nil
+} //LogIncomingAPIRequest()
+
+//ApiLog is the SQS event details struct encoded as JSON document, sent to SQS, to be logged for each API handler executed.
+type ApiLog struct {
+	StartTime           time.Time           `json:"start_time"`
+	EndTime             time.Time           `json:"end_time"`
+	DurMs               int64               `json:"duration_ms"` //duration in milliseconds
+	Method              string              `json:"method"`
+	Address             string              `json:"address"` //server address for incoming and outgoing
+	Path                string              `json:"path"`
+	ResponseCode        int                 `json:"response_code"`
+	RequestID           string              `json:"request_id"`
+	InitialAuthUsername string              `json:"initial_auth_username,omitempty"`
+	InitialAuthType     string              `json:"initial_auth_type,omitempty"`
+	AccountID           int64               `json:"account_id,omitempty"`
+	Username            string              `json:"username,omitempty"`
+	SourceIP            string              `json:"source_ip,omitempty"`  //only logged for incoming API
+	UserAgent           string              `json:"user_agent,omitempty"` //only for incoming, indicate type of browser when UI
+	RelevantID          string              `json:"relevant_id,omitempty"`
+	Request             ApiLogRequest       `json:"request"`
+	Response            ApiLogResponse      `json:"response"`
+	Actions             []RelativeActionLog `json:"actions,omitempty"`
+}
+
+type ApiLogRequest struct {
+	Headers         map[string]string `json:"headers,omitempty"`
+	QueryParameters map[string]string `json:"query_parameters,omitempty"`
+	BodySize        int               `json:"body_size" search:"long"` //set even when body is truncated/omitted
+	Body            string            `json:"body,omitempty"`          //json body as a string
+}
+
+type ApiLogResponse struct {
+	Headers  map[string]string `json:"headers,omitempty"`
+	BodySize int               `json:"body_size"`      //set even when body is truncated/omitted
+	Body     string            `json:"body,omitempty"` //json content as a string
+}
diff --git a/logs/config.go b/logs/config.go
new file mode 100644
index 0000000000000000000000000000000000000000..f969464855300c071902cdc9d0db5c88e4e31a8c
--- /dev/null
+++ b/logs/config.go
@@ -0,0 +1,62 @@
+package logs
+
+import (
+	"context"
+	"time"
+
+	"gitlab.com/uafrica/go-utils/config"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/redis"
+)
+
+type Config struct {
+	ActionsKeep                 bool  `json:"actions_keep" doc:"Set true to keep list of actions in logs"`
+	ActionsMaxSQLLength         int64 `json:"actions_max_sql_length" doc:"Set length of SQL query to keep in action list (default 0 = delete)"`
+	ActionsMaxSQSReqBodyLength  int64 `json:"actions_max_sqs_req_body_length" doc:"Set length of SQS Request body to keep in action list (default 0 = delete)"`
+	ActionsMaxAPIReqBodyLength  int64 `json:"actions_max_api_req_body_length" doc:"Set length of API Request body to keep in action list (default 0 = delete)"`
+	ActionsMaxAPIResBodyLength  int64 `json:"actions_max_api_res_body_length" doc:"Set length of API Response body to keep in action list (default 0 = delete)"`
+	ActionsMaxSearchQueryLength int64 `json:"actions_max_search_query_length" doc:"Set length of search query to keep in action list (default 0 = delete)"`
+}
+
+const configPrefix = "LOGS"
+
+var (
+	logConfig         Config
+	dynamicLogConfig  Config
+	dynamicExpireTime time.Time
+	redisCli          redis.IRedis
+)
+
+func init() {
+	if err := config.LoadEnv(configPrefix, &logConfig); err != nil {
+		logger.Errorf("failed to load LOGS config: %+v", err)
+	}
+	dynamicLogConfig = logConfig
+	dynamicExpireTime = time.Now()
+
+	//see if can load overrides from redis
+	var err error
+	redisCli, err = redis.New(context.Background())
+	if err != nil {
+		logger.Errorf("Not able to connect to REDIS for runtime %s config: %+v", configPrefix, err)
+	}
+}
+
+//todo: call only on each use and check expiry time before reading from REDIS again, e.g. reload no faster that 10s
+func currentLogConfig() Config {
+	if redisCli == nil || dynamicExpireTime.After(time.Now()) {
+		return dynamicLogConfig
+	}
+
+	//time to attempt reload
+	//copy static config then overload values which are defined from REDIS
+	dynamicLogConfig = logConfig
+	dynamicExpireTime = time.Now().Add(time.Second * 10)
+
+	if err := config.Load(configPrefix, &dynamicLogConfig, redisCli); err != nil {
+		logger.Errorf("failed to load %s config from REDIS", configPrefix)
+	} else {
+		logger.Debugf("Loaded %s config: %+v", configPrefix, dynamicLogConfig)
+	}
+	return dynamicLogConfig
+} //runtimeConfigLoad
diff --git a/logs/cron.logs.go b/logs/cron.logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..1f58fd5265d3e22a565749d75fac4d43fce58c90
--- /dev/null
+++ b/logs/cron.logs.go
@@ -0,0 +1,3 @@
+package logs
+
+//todo... currently monitored from CloudWatch...
diff --git a/logs/periods.go b/logs/periods.go
new file mode 100644
index 0000000000000000000000000000000000000000..536727df91ebfff8deb69a21826ad00f17ded2cf
--- /dev/null
+++ b/logs/periods.go
@@ -0,0 +1,103 @@
+package logs
+
+import (
+	"time"
+)
+
+type Period struct {
+	Start time.Time `json:"start_time"`
+	End   time.Time `json:"end_time"`
+}
+
+func (p Period) Duration() time.Duration {
+	return p.End.Sub(p.Start)
+}
+
+type Periods []Period
+
+func NewPeriods(start time.Time, end time.Time) Periods {
+	if end.Before(start) {
+		return []Period{}
+	}
+	return []Period{{Start: start, End: end}}
+}
+
+func (ps Periods) Without(p Period) Periods {
+	if len(ps) == 0 {
+		return ps //nothing left to take from
+	}
+	if p.End.Before(ps[0].Start) {
+		return ps //before first period
+	}
+	if p.Start.After(ps[len(ps)-1].End) {
+		return ps //after last period
+	}
+
+	//logger.Debugf("Start: %+v", ps)
+	nextIndex := 0
+	for nextIndex < len(ps) && ps[nextIndex].End.Before(p.Start) {
+		//logger.Debugf("skip[%d]: %s > %s", nextIndex, p.Start, ps[nextIndex].End)
+		nextIndex++
+	}
+	toDelete := []int{}
+	for nextIndex < len(ps) && ps[nextIndex].End.Before(p.End) {
+		if ps[nextIndex].Start.Before(p.Start) {
+			//trim tail
+			//logger.Debugf("tail[%d] %s->%s", nextIndex, ps[nextIndex].End, p.Start)
+			ps[nextIndex].End = p.Start
+		} else {
+			//delete this period completely and move to next
+			toDelete = append(toDelete, nextIndex)
+			//logger.Debugf("delete[%d] %s..%s", nextIndex, ps[nextIndex].Start, ps[nextIndex].End)
+		}
+		nextIndex++
+	}
+	if nextIndex < len(ps) && ps[nextIndex].End.After(p.End) {
+		if ps[nextIndex].Start.Before(p.Start) {
+			//remove part of this period
+			ps = append(ps, Period{Start: p.End, End: ps[nextIndex].End})
+			ps[nextIndex].End = p.Start
+			//logger.Debugf("split[%d]", nextIndex)
+		} else {
+			if ps[nextIndex].Start.Before(p.End) {
+				//trim head of period to start after removed peroid, then stop
+				//logger.Debugf("head[%d] %s->%s", nextIndex, ps[nextIndex].Start, p.End)
+				ps[nextIndex].Start = p.End
+			}
+		}
+	}
+
+	//delete selected periods completely
+	newPS := []Period{}
+	for i, p := range ps {
+		if len(toDelete) > 0 && i == toDelete[0] {
+			toDelete = toDelete[1:]
+		} else {
+			newPS = append(newPS, p)
+		}
+	}
+	//logger.Debugf("final: %+v", newPS)
+	return newPS
+}
+
+//Span is (last.end - first.start)
+func (ps Periods) Span() time.Duration {
+	if len(ps) > 0 {
+		return ps[len(ps)-1].End.Sub(ps[0].Start)
+	}
+	return time.Duration(0)
+}
+
+//Duration is sum of all period durations
+func (ps Periods) Duration() time.Duration {
+	dur := time.Duration(0)
+	for _, p := range ps {
+		dur += p.Duration()
+	}
+	return dur
+}
+
+//Gaps is (Span - Duration), i.e. time between periods
+func (ps Periods) Gaps() time.Duration {
+	return ps.Span() - ps.Duration()
+}
diff --git a/logs/periods_test.go b/logs/periods_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..e8804248aa15cd348d13b3077c259b35d330709b
--- /dev/null
+++ b/logs/periods_test.go
@@ -0,0 +1,59 @@
+package logs_test
+
+import (
+	"testing"
+	"time"
+
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/logs"
+)
+
+func TestPeriods(t *testing.T) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+	t0 := time.Date(2021, 01, 01, 0, 0, 0, 0, time.Now().Location())
+	ps := logs.NewPeriods(t0, t0.Add(time.Hour))
+	t.Log(ps)
+	//ps: 0..60
+
+	//split[0]
+	ps1 := ps.Without(logs.Period{Start: t0.Add(time.Minute * 5), End: t0.Add(time.Minute * 10)})
+	t.Log(ps1)
+	//-(5..10) -> ps1: 0..5, 10..60
+
+	//split[1]
+	ps2 := ps1.Without(logs.Period{Start: t0.Add(time.Minute * 15), End: t0.Add(time.Minute * 20)})
+	t.Log(ps2)
+	//-(15..20) -> ps1: 0..5, 10..15, 20..60
+
+	//trim head[2]
+	ps3 := ps2.Without(logs.Period{Start: t0.Add(time.Minute * 18), End: t0.Add(time.Minute * 21)})
+	t.Log(ps3)
+	//-(18..21) -> ps1: 0..5, 10..15, 21..60
+
+	//trim tail[1]
+	ps4 := ps3.Without(logs.Period{Start: t0.Add(time.Minute * 14), End: t0.Add(time.Minute * 19)})
+	t.Log(ps4)
+	//-(14..19) -> ps1: 0..5, 10..14, 21..60
+
+	//tail, delete, head
+	ps5 := ps4.Without(logs.Period{Start: t0.Add(time.Minute * 4), End: t0.Add(time.Minute * 22)})
+	t.Log(ps5)
+	//-(4..22) -> ps1: 0..4, 22..60
+
+	//over start
+	ps6 := ps5.Without(logs.Period{Start: t0.Add(-time.Minute * 1), End: t0.Add(time.Minute * 2)})
+	t.Log(ps6)
+	//-(-1..2) -> ps1: 2..4, 22..60
+
+	//over end
+	ps7 := ps6.Without(logs.Period{Start: t0.Add(time.Minute * 50), End: t0.Add(time.Minute * 120)})
+	t.Log(ps7)
+	//-(50..120) -> ps1: 2..4, 22..50
+
+	//all
+	ps8 := ps7.Without(logs.Period{Start: t0.Add(time.Minute * 0), End: t0.Add(time.Minute * 120)})
+	t.Log(ps8)
+	//-(0..120) -> ps1: nil
+
+}
diff --git a/logs/sqs-logs.go b/logs/sqs-logs.go
new file mode 100644
index 0000000000000000000000000000000000000000..22f45d21f13ec945238fac83e263c325adf8c1de
--- /dev/null
+++ b/logs/sqs-logs.go
@@ -0,0 +1,97 @@
+package logs
+
+import (
+	"encoding/json"
+	"fmt"
+	"os"
+	"time"
+
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+)
+
+//Call this at the end of an SQS event handler to capture the req and result as well as all actions taken during the processing
+//(note: action list is only reset when this is called - so must be called after each handler, else action list has to be reset at the start)
+func LogSQSRequest(startTime time.Time,
+	requestID string, //from API
+	messageType string,
+	req interface{},
+	handlerErr error,
+) error {
+	if producer == nil {
+		return errors.Errorf("logs queue producer not set")
+	}
+
+	if !sqsLogEnabled {
+		return nil
+	}
+
+	endTime := time.Now()
+	log := ApiLog{
+		StartTime: startTime,
+		EndTime:   endTime,
+		DurMs:     endTime.Sub(startTime).Milliseconds(),
+		RequestID: requestID,
+		Method:    "SQS",
+		Path:      messageType,
+	}
+
+	if req != nil {
+		if reqString, ok := req.(string); ok {
+			log.Request.Body = reqString //do not marshal else we have double-escaped JSON
+			log.Request.BodySize = len(reqString)
+		} else {
+			if jsonReq, err := json.Marshal(req); err == nil {
+				log.Request.Body = string(jsonReq)
+				log.Request.BodySize = len(log.Request.Body)
+			}
+		}
+	}
+
+	if handlerErr == nil {
+		log.ResponseCode = 0
+	} else {
+		log.ResponseCode = 1
+		errorInfo := ErrorInfo{
+			Error:   handlerErr.Error(),
+			Details: fmt.Sprintf("%+v", handlerErr),
+		}
+		jsonError, _ := json.Marshal(errorInfo)
+		log.Response.Body = string(jsonError)
+	}
+
+	//copy then reset actionList for the next handler
+	actionListMutex.Lock()
+	actionList = []ActionLog{}
+	actionListMutex.Unlock()
+
+	//todo: filter out sensitive values (e.g. OTP)
+
+	//note: we send SQS logs to "API_LOGS" which already exists... should be renamed to simply "LOGS"
+	//it use the same structure, but method="SQS" and path="messageType" and request is the event body
+	//so they can be plotted on the same dashboard visualisation in OpenSearch with all the same filters/metrics
+	if _, err := producer.NewEvent("API_LOGS").
+		Type("api-log").
+		RequestID(requestID).
+		Send(log); err != nil {
+		return errors.Wrapf(err, "failed to send api-log for SQS")
+	}
+	return nil
+}
+
+var sqsLogEnabled = false
+
+func init() {
+	envSetting := os.Getenv("SQS_LOGS_ENABLED")
+	if envSetting == "true" {
+		sqsLogEnabled = true
+	}
+	//if consuming from API_LOGS, do not enable else we will consume and send to our own queue!
+
+	logger.Infof("Environment SQS_LOGS_ENABLED=\"%s\" -> sqsLogsEnabled=%v", envSetting, sqsLogEnabled)
+}
+
+type ErrorInfo struct {
+	Error   string `json:"error"`
+	Details string `json:"details"`
+}
diff --git a/queues/README.md b/queues/README.md
index c592a569ecf9057956dbb2edfe8d71bba8b0f806..1ac12125d6ef156d35320068073c366eb26c069b 100644
--- a/queues/README.md
+++ b/queues/README.md
@@ -59,10 +59,10 @@ func MyHandler(ctx queues.Context, body MyBody) (err error) {
 Notes:
 * The route name is the name specified in
 
-    ```service.NewEvent(...).Type(<name>)```
+    ```ctx.NewEvent(...).Type(<name>)```
 * The ```body``` should have the same type you used elsewhere in
 
-    ```service.NewEvent(...).Send(<type>)```
+    ```ctx.NewEvent(...).Send(<type>)```
 
 * The optional body type Validate() method will be called and must return nil before the handler will be called.
 
diff --git a/queues/audit.go b/queues/audit.go
deleted file mode 100644
index 16d50098946c8aecc76aa2b26bffb9011e13fdda..0000000000000000000000000000000000000000
--- a/queues/audit.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package queues
-
-import (
-	"time"
-
-	"gitlab.com/uafrica/go-utils/audit"
-	"gitlab.com/uafrica/go-utils/errors"
-	"gitlab.com/uafrica/go-utils/service"
-)
-
-//create auditor that push to a queue using the specified producer
-func Auditor(queueName string, messageType string, producer service.ProducerLogger) audit.Auditor {
-	if producer == nil {
-		panic(errors.Errorf("cannot create auditor with producer=nil"))
-	}
-	if queueName == "" {
-		queueName = "AUDIT"
-	}
-	if messageType == "" {
-		messageType = "audit"
-	}
-	return auditor{
-		producer:    producer,
-		queueName:   queueName,
-		messageType: messageType,
-	}
-}
-
-type auditor struct {
-	producer    service.ProducerLogger
-	queueName   string
-	messageType string
-}
-
-func (a auditor) WriteEvent(requestID string, event audit.Event) error {
-	_, err := service.NewEvent(a.producer, a.queueName).
-		RequestID(requestID).
-		Type(a.messageType).
-		Send(event)
-	if err != nil {
-		return errors.Wrapf(err, "failed to write audit event")
-	}
-	return nil
-}
-
-func (a auditor) WriteValues(startTime, endTime time.Time, requestID string, values map[string]interface{}) error {
-	_, err := service.NewEvent(a.producer, a.queueName).
-		RequestID(requestID).
-		Type(a.messageType).
-		Send(values)
-	if err != nil {
-		return errors.Wrapf(err, "failed to write audit values")
-	}
-	return nil
-}
diff --git a/queues/check.go b/queues/check.go
deleted file mode 100644
index f0148eef81b12737ba3b85d1082f0ecf99711956..0000000000000000000000000000000000000000
--- a/queues/check.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package queues
-
-type ICheck interface {
-	Check(Context) (interface{}, error)
-}
diff --git a/service/event.go b/queues/event.go
similarity index 74%
rename from service/event.go
rename to queues/event.go
index 08f6c98c6d78bcbf9471c68ac79b36e76a3a5d59..18fca85535378b4134d61d70db8bd3af00d4ca2f 100644
--- a/service/event.go
+++ b/queues/event.go
@@ -1,4 +1,4 @@
-package service
+package queues
 
 import (
 	"encoding/json"
@@ -9,12 +9,7 @@ import (
 	"gitlab.com/uafrica/go-utils/logger"
 )
 
-type ProducerLogger interface {
-	Producer
-	logger.Logger
-}
-
-func NewEvent(producer ProducerLogger, queueName string) Event {
+func NewEvent(producer Producer, queueName string) Event {
 	if producer == nil {
 		panic(errors.Errorf("NewEvent(producer=nil)"))
 	}
@@ -31,7 +26,7 @@ func NewEvent(producer ProducerLogger, queueName string) Event {
 }
 
 type Event struct {
-	producer       ProducerLogger
+	producer       Producer
 	MessageID      string            //assigned by implementation (AWS/mem/..)
 	QueueName      string            //queue determine sequencing, items in same queue are delivered one-after-the-other, other queues may deliver concurrent to this queue
 	TypeName       string            //type determines which handler processes the event
@@ -81,28 +76,37 @@ func (event Event) Params(params map[string]string) Event {
 	return event
 }
 
+var log = logger.New()
+
 func (event Event) Send(value interface{}) (string, error) {
 	if event.producer == nil {
 		return "", errors.Errorf("send with producer==nil")
 	}
 	if value != nil {
-		jsonBody, err := json.Marshal(value)
-		if err != nil {
-			return "", errors.Wrapf(err, "failed to JSON encode event body")
+		if valueString, ok := value.(string); ok {
+			event.BodyJSON = valueString
+		} else {
+			jsonBody, err := json.Marshal(value)
+			if err != nil {
+				return "", errors.Wrapf(err, "failed to JSON encode event body")
+			}
+			event.BodyJSON = string(jsonBody)
 		}
-		event.BodyJSON = string(jsonBody)
 	}
 	if msgID, err := event.producer.Send(event); err != nil {
 		return "", errors.Wrapf(err, "failed to send event")
 	} else {
-		event.producer.WithFields(map[string]interface{}{
-			"queue":  event.QueueName,
-			"type":   event.TypeName,
-			"due":    event.DueTime,
-			"params": event.ParamValues,
-			"body":   event.BodyJSON,
-			"msg_id": msgID,
-		}).Info("Sent event")
+		//do not log when we send to internal AUDIT/API_LOGS
+		if event.QueueName != "AUDIT" && event.QueueName != "API_LOGS" {
+			log.WithFields(map[string]interface{}{
+				"queue":  event.QueueName,
+				"type":   event.TypeName,
+				"due":    event.DueTime,
+				"params": event.ParamValues,
+				"body":   event.BodyJSON,
+				"msg_id": msgID,
+			}).Info("Sent event")
+		}
 		return msgID, nil
 	}
 }
diff --git a/service/producer.go b/queues/producer.go
similarity index 55%
rename from service/producer.go
rename to queues/producer.go
index 050c56f8134cd77432b3383e827d163933f60aa8..00df1db12444c7c33d67c9ca08861bbba1f022b8 100644
--- a/service/producer.go
+++ b/queues/producer.go
@@ -1,7 +1,7 @@
-package service
+package queues
 
 //Producer sends an event for async processing
 type Producer interface {
+	NewEvent(queueName string) Event
 	Send(event Event) (msgID string, err error)
-	//todo: method to request an event after some delay with incrementing attempt nr
 }
diff --git a/queues/sqs/README.md b/queues/sqs_producer/README.md
similarity index 100%
rename from queues/sqs/README.md
rename to queues/sqs_producer/README.md
diff --git a/queues/sqs/producer.go b/queues/sqs_producer/producer.go
similarity index 86%
rename from queues/sqs/producer.go
rename to queues/sqs_producer/producer.go
index ce10639f7eef631b83195b07d2a0a582e25a3995..649f6dbad7a998c07bb31a4c53a816ca51dfa08c 100644
--- a/queues/sqs/producer.go
+++ b/queues/sqs_producer/producer.go
@@ -1,4 +1,4 @@
-package sqs
+package sqs_producer
 
 import (
 	"os"
@@ -11,10 +11,11 @@ import (
 	"github.com/aws/aws-sdk-go/service/sqs"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
-	"gitlab.com/uafrica/go-utils/service"
+	"gitlab.com/uafrica/go-utils/logs"
+	"gitlab.com/uafrica/go-utils/queues"
 )
 
-func NewProducer(requestIDHeaderKey string) service.Producer {
+func New(requestIDHeaderKey string) queues.Producer {
 	region := os.Getenv("AWS_REGION")
 	if region == "" {
 		panic(errors.Errorf("environment AWS_REGION is not defined"))
@@ -39,8 +40,12 @@ type producer struct {
 	queues             map[string]*QueueProducer
 }
 
+func (producer *producer) NewEvent(queueName string) queues.Event {
+	return queues.NewEvent(producer, queueName)
+}
+
 // Note: Calling code needs SQS IAM permissions
-func (producer *producer) Send(event service.Event) (string, error) {
+func (producer *producer) Send(event queues.Event) (string, error) {
 	logger.Debugf("SQS producer.Send(%+v)", event)
 	messenger, ok := producer.queues[event.QueueName]
 	if !ok {
@@ -89,8 +94,11 @@ type QueueProducer struct {
 	queueURL string
 }
 
-func (m *QueueProducer) Send(event service.Event) (string, error) {
-	logger.Debugf("SQS producer.queue(%s) Sending event %+v", m.queueURL, event)
+func (m *QueueProducer) Send(event queues.Event) (string, error) {
+	startTime := time.Now()
+	defer func() {
+		logs.LogSQSSent(startTime, event.QueueName, event.TypeName, event.BodyJSON)
+	}()
 
 	//add params as message attributes
 	msgAttrs := make(map[string]*sqs.MessageAttributeValue)
diff --git a/redis/redis.go b/redis/redis.go
index 23d667e5a1b39bed142a5409419b364f5617f1f9..5dac4995280bbb7ac8ac91bbedebffd2f22dd850 100644
--- a/redis/redis.go
+++ b/redis/redis.go
@@ -10,9 +10,11 @@ import (
 	"github.com/go-redis/redis/v8"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/string_utils"
 )
 
 type IRedis interface {
+	string_utils.KeyReader
 	Del(key string) error
 	SetJSON(key string, value interface{}) error
 	SetJSONIndefinitely(key string, value interface{}) error
@@ -21,7 +23,6 @@ type IRedis interface {
 	SetString(key string, value string) error
 	SetStringIndefinitely(key string, value string) error
 	SetStringForDur(key string, value string, dur time.Duration) error
-	GetString(key string) (value string, ok bool)
 }
 
 type redisWithContext struct {
@@ -67,14 +68,18 @@ func (r redisWithContext) SetJSONForDur(key string, value interface{}, dur time.
 	if r.client == nil {
 		return errors.Errorf("REDIS disabled: cannot set JSON key(%s) = (%T)%v", key, value, value)
 	}
-	jsonBytes, err := json.Marshal(value)
-	if err != nil {
-		return errors.Wrapf(err, "failed to JSON encode key(%s) = (%T)", key, value)
+	valueStr, ok := value.(string)
+	if !ok {
+		jsonBytes, err := json.Marshal(value)
+		if err != nil {
+			return errors.Wrapf(err, "failed to JSON encode key(%s) = (%T)", key, value)
+		}
+		valueStr = string(jsonBytes)
 	}
-	if _, err = r.client.Set(r.Context, key, string(jsonBytes), dur).Result(); err != nil {
+	if _, err := r.client.Set(r.Context, key, valueStr, dur).Result(); err != nil {
 		return errors.Wrapf(err, "failed to set JSON key(%s)", key)
 	}
-	logger.Debugf("REDIS.SetJSON(%s)=%s (%T) (exp: %v)", key, string(jsonBytes), value, dur)
+	logger.Debugf("REDIS.SetJSON(%s)=%s (%T) (exp: %v)", key, valueStr, value, dur)
 	return nil
 }
 
@@ -130,6 +135,23 @@ func (r redisWithContext) GetString(key string) (string, bool) {
 	return value, true
 }
 
+func (r redisWithContext) Keys(prefix string) []string {
+	if r.client == nil {
+		return nil
+	}
+	value, err := r.client.Keys(r.Context, prefix+"*").Result()
+	if err != nil { /* Actual error */
+		if err != redis.Nil { /* other than no keys match */
+			logger.Errorf("Error fetching redis keys(%s*): %+v", prefix, err)
+		} else {
+			logger.Errorf("Failed: %+v", err)
+		}
+		return nil //no matches
+	}
+	logger.Debugf("Keys(%s): %+v", prefix, value)
+	return value
+}
+
 //global connection to REDIS used in all context
 var globalClient *redis.Client
 
diff --git a/search/document_store.go b/search/document_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c040d25800f25b10515fb2d52961aaad6a5fcc0
--- /dev/null
+++ b/search/document_store.go
@@ -0,0 +1,329 @@
+package search
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+	"reflect"
+	"strings"
+	"time"
+
+	opensearchapi "github.com/opensearch-project/opensearch-go/opensearchapi"
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/logs"
+	"gitlab.com/uafrica/go-utils/reflection"
+)
+
+type DocumentStore interface {
+	Write(id string, data interface{}) error
+	Search(query Query, limit int64) (ids []string, totalCount int, err error)
+	Get(id string) (doc interface{}, err error)
+	Delete(id string) error
+}
+
+type documentStore struct {
+	w                      *writer
+	name                   string
+	dataType               reflect.Type
+	settings               Settings
+	mappings               Mappings
+	jsonSettings           []byte
+	jsonMappings           []byte
+	created                bool
+	searchResponseBodyType reflect.Type
+	getResponseBodyType    reflect.Type
+}
+
+//purpose:
+//	create a document store index to write e.g. orders then allow one to search them
+//parameters:
+//	name must be the complete openSearch index name e.g. "uafrica-v3-orders"
+//	tmpl must be your document data struct consisting of public fields as:
+//		Xxx string `json:"<name>" search:"keyword|text|long|date"`	(can later add more types)
+//		Xxx time.Time `json:"<name>"`								assumes type "date" for opensearch
+//		Xxx int `json:"<name>"`										assumes type "long" for opensearch, specify keyword if required
+func (w *writer) DocumentStore(name string, tmpl interface{}) (DocumentStore, error) {
+	if !indexNameRegex.MatchString(name) {
+		return nil, errors.Errorf("invalid index_name:\"%s\"", name)
+	}
+
+	//if already created, just return
+	if existingDocumentStore, ok := w.documentStoreByName[name]; ok {
+		return existingDocumentStore, nil
+	}
+
+	structType := reflect.TypeOf(tmpl)
+	if tmpl == nil || structType.Kind() != reflect.Struct {
+		return nil, errors.Errorf("%T is not a struct", tmpl)
+	}
+
+	ds := &documentStore{
+		w:        w,
+		name:     name,
+		dataType: structType,
+		created:  false,
+	}
+
+	//define the OpenSearch index mapping
+	ds.settings = Settings{
+		Index: &SettingsIndex{
+			NumberOfShards:   4,
+			NumberOfReplicas: 0,
+		},
+	}
+
+	if properties, err := structMappingProperties(structType); err != nil {
+		return nil, errors.Wrapf(err, "cannot map struct %s", structType)
+	} else {
+		ds.mappings = Mappings{
+			Properties: properties,
+		}
+	}
+
+	var err error
+	ds.jsonSettings, err = json.Marshal(ds.settings)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to marshal index settings")
+	}
+	ds.jsonMappings, err = json.Marshal(ds.mappings)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to marshal index mappings")
+	}
+	logger.Infof("%s Index Mappings: %s", structType, string(ds.jsonMappings))
+
+	//define search response type
+	//similar to SearchResponseBody
+	ds.searchResponseBodyType, err = reflection.CloneType(
+		reflect.TypeOf(SearchResponseBody{}),
+		map[string]reflect.Type{
+			".hits.hits[]._source": ds.dataType,
+		})
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to make search response type for document store")
+	}
+
+	//define get response type
+	//similar to GetResponseBody
+	ds.getResponseBodyType, err = reflection.CloneType(
+		reflect.TypeOf(GetResponseBody{}),
+		map[string]reflect.Type{
+			"._source": ds.dataType,
+		})
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to make get response type for document store")
+	}
+	w.documentStoreByName[name] = ds
+	return ds, nil
+}
+
+//data must be of type specified in Writer.TimeSeries(tmpl)
+func (ds *documentStore) Write(id string, data interface{}) error {
+	if data == nil {
+		return errors.Errorf("data:nil")
+	}
+	t := reflect.TypeOf(data)
+	if t != ds.dataType {
+		return errors.Errorf("cannot write %T into DocumentStore(%s), expecting %s", data, ds.name, ds.dataType)
+	}
+
+	//get daily search index to write to, from start time
+	indexName := ds.name // + "-" + startTime.Format("20060102")
+	if !ds.created {
+		res, err := ds.w.api.Create(
+			indexName, //index name
+			indexName, //document id
+			strings.NewReader(string(ds.jsonSettings)))
+		if err != nil {
+			return errors.Wrapf(err, "failed to create index(%s)", indexName)
+		}
+		switch res.StatusCode {
+		case http.StatusOK:
+		case http.StatusCreated:
+		case http.StatusConflict: //409 = already exists
+		default:
+			return errors.Errorf("failed to create index(%s): %v %s %s", indexName, res.StatusCode, res.Status(), res.String())
+		}
+
+		res, err = opensearchapi.IndicesPutMappingRequest{
+			Index: []string{indexName},
+			Body:  strings.NewReader(string(ds.jsonMappings)),
+		}.Do(context.Background(), ds.w.client)
+		if err != nil {
+			return errors.Wrapf(err, "failed to create index(%s)", indexName)
+		}
+		switch res.StatusCode {
+		case http.StatusOK:
+		case http.StatusCreated:
+		case http.StatusConflict: //409 = already exists
+		default:
+			return errors.Errorf("failed to create index(%s): %v %s %s", indexName, res.StatusCode, res.Status(), res.String())
+		}
+		ds.created = true
+	}
+	if res, err := ds.w.Write(indexName, id, data); err != nil {
+		return err
+	} else {
+		logger.Debugf("IndexResponse: %+v", res)
+	}
+	return nil
+}
+
+//Search
+//Return:
+//	docs will be a slice of the DocumentStore data type
+func (ds *documentStore) Search(query Query, limit int64) (ids []string, totalCount int, err error) {
+	if ds == nil {
+		return nil, 0, errors.Errorf("document store == nil")
+	}
+	if limit < 0 || limit > 1000 {
+		err = errors.Errorf("limit=%d not 0..1000", limit)
+		return
+	}
+
+	// example search request body for free text
+	// 	{
+	// 		"size": 5,
+	// 		"query": {
+	// 			"multi_match": {
+	// 				"query": "miller",
+	// 				"fields": ["title^2", "director"]
+	// 			}
+	//    	}
+	//  }
+	body := SearchRequestBody{
+		Size:  limit,
+		Query: query,
+	}
+
+	jsonBody, _ := json.Marshal(body)
+	search := opensearchapi.SearchRequest{
+		Index: []string{ds.name},
+		Body:  bytes.NewReader(jsonBody),
+	}
+
+	startTime := time.Now()
+	defer func() {
+		logs.LogSearch(startTime, ds.name, string(jsonBody))
+	}()
+
+	searchResponse, err := search.Do(context.Background(), ds.w.client)
+	if err != nil {
+		err = errors.Wrapf(err, "failed to search documents")
+		return
+	}
+
+	switch searchResponse.StatusCode {
+	case http.StatusOK:
+	default:
+		resBody, _ := ioutil.ReadAll(searchResponse.Body)
+		err = errors.Errorf("Search failed with HTTP status %v: %s", searchResponse.StatusCode, string(resBody))
+		return
+	}
+
+	bodyData, _ := ioutil.ReadAll(searchResponse.Body)
+	logger.Debugf("Response Body: %s", string(bodyData))
+
+	resBodyPtrValue := reflect.New(ds.searchResponseBodyType)
+	// if err = json.NewDecoder(searchResponse.Body).Decode(resBodyPtrValue.Interface()); err != nil {
+	if err = json.Unmarshal(bodyData, resBodyPtrValue.Interface()); err != nil {
+		logger.Errorf("search response body: %s", string(bodyData))
+		err = errors.Wrapf(err, "cannot decode search response body")
+		return
+	}
+
+	logger.Debugf("Response Parsed: %+v", resBodyPtrValue.Interface())
+
+	hitsTotalValue, err := reflection.Get(resBodyPtrValue, ".hits.total.value")
+	if err != nil {
+		err = errors.Wrapf(err, "cannot get total nr of hits")
+		return
+	}
+	if hitsTotalValue.Interface().(int) < 1 {
+		return nil, 0, nil //no matches
+	}
+
+	foundIDs, err := reflection.Get(resBodyPtrValue, ".hits.hits[]._id")
+	if err != nil {
+		err = errors.Wrapf(err, "cannot get search response documents")
+		return
+	}
+	logger.Errorf("items: (%T) %+v", foundIDs.Interface(), foundIDs.Interface())
+	return foundIDs.Interface().([]string), hitsTotalValue.Interface().(int), nil
+}
+
+func (ds *documentStore) Get(id string) (doc interface{}, err error) {
+	if ds == nil {
+		return nil, errors.Errorf("document store == nil")
+	}
+	get := opensearchapi.GetRequest{
+		Index:        ds.name,
+		DocumentType: "_doc",
+		DocumentID:   id,
+	}
+	getResponse, err := get.Do(context.Background(), ds.w.client)
+	if err != nil {
+		err = errors.Wrapf(err, "failed to get document")
+		return
+	}
+
+	switch getResponse.StatusCode {
+	case http.StatusOK:
+	default:
+		resBody, _ := ioutil.ReadAll(getResponse.Body)
+		err = errors.Errorf("Get failed with HTTP status %v: %s", getResponse.StatusCode, string(resBody))
+		return
+	}
+
+	resBodyPtrValue := reflect.New(ds.getResponseBodyType)
+	if err = json.NewDecoder(getResponse.Body).Decode(resBodyPtrValue.Interface()); err != nil {
+		err = errors.Wrapf(err, "cannot decode get response body")
+		return
+	}
+
+	foundVar, err := reflection.Get(resBodyPtrValue, ".found")
+	if err != nil {
+		err = errors.Wrapf(err, "cannot get found value")
+		return
+	}
+	if found, ok := foundVar.Interface().(bool); !ok || !found {
+		return nil, nil //not found
+	}
+
+	//found
+	source, err := reflection.Get(resBodyPtrValue, "._source")
+	if err != nil {
+		err = errors.Wrapf(err, "cannot get document from get response")
+		return
+	}
+	return source.Interface(), nil
+}
+
+func (ds *documentStore) Delete(id string) (err error) {
+	if ds == nil {
+		return errors.Errorf("document store == nil")
+	}
+	del := opensearchapi.DeleteRequest{
+		Index:        ds.name,
+		DocumentType: "_doc",
+		DocumentID:   id,
+	}
+	delResponse, err := del.Do(context.Background(), ds.w.client)
+	if err != nil {
+		err = errors.Wrapf(err, "failed to del document")
+		return
+	}
+
+	switch delResponse.StatusCode {
+	case http.StatusOK:
+	case http.StatusNotFound:
+	case http.StatusNoContent:
+	default:
+		resBody, _ := ioutil.ReadAll(delResponse.Body)
+		err = errors.Errorf("Del failed with HTTP status %v: %s", delResponse.StatusCode, string(resBody))
+		return
+	}
+	return nil
+}
diff --git a/search/document_store_test.go b/search/document_store_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..2c168d1e37f20c761709d9d74730083135308ef4
--- /dev/null
+++ b/search/document_store_test.go
@@ -0,0 +1,223 @@
+package search_test
+
+import (
+	"fmt"
+	"math/rand"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/google/uuid"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/search"
+)
+
+func TestLocalDocuments(t *testing.T) {
+	testDocuments(t, search.Config{
+		Addresses: []string{"https://localhost:9200"},
+	})
+}
+
+func TestDevDocuments(t *testing.T) {
+	testDocuments(t, search.Config{
+		Addresses: []string{"https://search-uafrica-v3-api-logs-fefgiypvmb3sg5wqohgsbqnzvq.af-south-1.es.amazonaws.com/"}, //from AWS Console OpenSearch Service > Domains > uafrica-v3-api-logs > General Information: Domain Endpoints
+		Username:  "uafrica",
+		Password:  "Aiz}a4ee",
+	})
+}
+
+func testDocuments(t *testing.T, c search.Config) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+	a, err := search.New(c)
+	if err != nil {
+		t.Fatalf("failed to create writer: %+v", err)
+	}
+
+	indexName := "go-utils-search-docs-test"
+	ds, err := a.DocumentStore(indexName, SearchOrder{})
+	if err != nil {
+		t.Fatalf("failed to create document store: %+v", err)
+	}
+
+	//write N documents
+	buyers := []string{"Joe", "Anne", "Griet", "Kobus", "Caleb", "Roger", "Susan", "Maria", "Sandra"}
+	sellers := []string{"Hannelie", "Angelica", "Louis", "Bertus", "Bongi", "Vusi", "Andrew", "Joseph"}
+	nextItem := 0
+	itemInfos := []testItemInfo{
+		{Name: "Samsung 17\" LCD Monitor", Cost: 0},
+		{Name: "Acer 15\" LED Monitor", Cost: 0},
+		{Name: "Apple M1 16\" MAC", Cost: 0},
+		{Name: "Red Dress size M", Cost: 0},
+		{Name: "Grey Shorts size 115cm", Cost: 0},
+		{Name: "Black Student Prince School Shoes Boys Size 8", Cost: 0},
+		{Name: "Black Student Prince School Shoes Boys Size 9", Cost: 0},
+		{Name: "Black Student Prince School Shoes Boys Size 10", Cost: 0},
+		{Name: "Black Student Prince School Shoes Boys Size 11", Cost: 0},
+		{Name: "Black Student Prince School Shoes Girst Size 6", Cost: 0},
+		{Name: "Black Student Prince School Shoes Girst Size 7", Cost: 0},
+		{Name: "Black Student Prince School Shoes Girst Size 8", Cost: 0},
+		{Name: "Faber Castell HB Pencil", Cost: 0},
+		{Name: "Faber Castell 2H Pencil", Cost: 0},
+		{Name: "Faber Castell 4H Pencil", Cost: 0},
+		{Name: "12 Colour Crayons", Cost: 0},
+		{Name: "Steadler Rubber", Cost: 0},
+	}
+	N := 100
+	for i := 0; i < N; i++ {
+		//make a random document
+		id := uuid.New().String()
+		buyer := buyers[rand.Intn(len(buyers))]
+		seller := sellers[rand.Intn(len(sellers))]
+		doc := SearchOrder{
+			ID:                            int64(i + 1),
+			AccountID:                     int64(i + 2),
+			AccountOrderNumber:            int64(i * 2),
+			ChannelID:                     int64(i),
+			ChannelOrderNumber:            fmt.Sprintf("CHO-%d", i),
+			ChannelOrderReference:         fmt.Sprintf("REF-%05d", i),
+			CustomerName:                  buyer,
+			CustomerEmail:                 strings.ToLower(buyer + "@home.net"),
+			CustomerPhone:                 "123456789",
+			DeliveryAddress:               "My Street",
+			Currency:                      "ZAR",
+			Items:                         []SearchOrderItem{},
+			TotalPrice:                    0,
+			TotalWeightKg:                 0,
+			TotalQty:                      0,
+			TotalFulfilledQty:             0,
+			Status:                        OrderStatusNew,
+			PaymentStatus:                 OrderPaymentStatusUnpaid,
+			TimeCreated:                   time.Now(),
+			TimeModified:                  nil,
+			Tags:                          "1(blue),2(red)",
+			BuyerSelectedShippingCost:     1.23,
+			BuyerSelectedShippingProvider: seller,
+		}
+		for i := 0; i < rand.Intn(5)+1; i++ {
+			itemInfo := itemInfos[nextItem]
+			nextItem++
+			if nextItem >= len(itemInfos) {
+				nextItem = 0
+			}
+			item := SearchOrderItem{
+				SKU:         fmt.Sprintf("SKU-%s-%03d", itemInfo.Name[:3], i),
+				Description: itemInfo.Name,
+				UnitPrice:   itemInfo.Cost,
+				Qty:         rand.Intn(3) + 1,
+			}
+			doc.Items = append(doc.Items, item)
+			doc.TotalPrice += item.UnitPrice * float64(item.Qty)
+			doc.TotalQty += item.Qty
+		}
+
+		//add to the search
+		if err := ds.Write(id, doc); err != nil {
+			t.Fatalf("failed to add doc: %+v", err)
+		}
+	}
+
+	//search some of the documents
+	query := search.Query{
+		MultiMatch: &search.QueryMultiMatch{
+			Query:  buyers[0],
+			Fields: []string{"buyer"},
+		},
+	}
+	ids, totalCount, err := ds.Search(query, 10)
+	if err != nil {
+		t.Errorf("failed to search: %+v", err)
+	} else {
+		t.Logf("search result total_count:%d with %d ids", totalCount, len(ids))
+		if len(ids) > 10 {
+			t.Errorf("got %d docs > max 10", len(ids))
+		}
+		for _, id := range ids {
+			if getDoc, err := ds.Get(id); err != nil {
+				t.Fatalf("failed to get: %+v", err)
+			} else {
+				t.Logf("   GOT: %s: %+v", id, getDoc)
+			}
+		}
+	}
+	t.Logf("Done")
+}
+
+// type testDocument struct {
+// 	//UUID       string    `json:"uuid"`
+// 	Buyer      string    `json:"buyer" search:"keyword"`
+// 	Seller     string    `json:"seller" search:"keyword"`
+// 	Items      []docItem `json:"items"`
+// 	TotalCost  float64   `json:"total_cost"`
+// 	TotalItems int       `json:"total_items"`
+// }
+
+// type docItem struct {
+// 	Description string  `json:"description" search:"keyword"`
+// 	UnitCost    float64 `json:"unit_cost"`
+// 	Qty         int     `json:"qty"`
+// }
+
+type testItemInfo struct {
+	Name string
+	Cost float64
+}
+
+type SearchOrder struct {
+	ID                            int64              `json:"id"`
+	AccountID                     int64              `json:"account_id"                search:"keyword"`
+	AccountOrderNumber            int64              `json:"account_order_number"`
+	ChannelID                     int64              `json:"channel,omitempty"         search:"keyword"`
+	ChannelOrderNumber            string             `json:"channel_order_number,omitempty"`
+	ChannelOrderReference         string             `json:"channel_order_reference,omitempty"`
+	CustomerName                  string             `json:"customer_name,omitempty"`
+	CustomerEmail                 string             `json:"customer_email,omitempty"`
+	CustomerPhone                 string             `json:"customer_phone,omitempty"`
+	DeliveryAddress               string             `json:"delivery_address,omitempty"`
+	Currency                      string             `json:"currency"                  search:"keyword"`
+	Items                         []SearchOrderItem  `json:"items,omitempty"`
+	TotalPrice                    float64            `json:"total_price"`
+	TotalWeightKg                 float64            `json:"total_weight_kg"`
+	TotalQty                      int                `json:"total_qty"`
+	TotalFulfilledQty             int                `json:"total_fulfilled_qty"`
+	Status                        OrderStatus        `json:"status"                    search:"keyword"`
+	PaymentStatus                 OrderPaymentStatus `json:"payment_status"            search:"keyword"`
+	TimeCreated                   time.Time          `json:"time_created"`
+	TimeModified                  *time.Time         `json:"time_modified,omitempty"`
+	Tags                          string             `json:"tags"                      search:"keyword"` //CSV or tags sorted, so [A,B] and [B,A] both -> "A,B" keyword
+	BuyerSelectedShippingCost     float64            `json:"buyer_selected_shipping_cost,omitempty"`
+	BuyerSelectedShippingProvider string             `json:"buyer_selected_shipping_provider,omitempty"`
+}
+
+type SearchOrderItem struct {
+	SKU           string  `json:"sku"                          search:"keyword"`
+	Description   string  `json:"description"                  search:"keyword"`
+	Vendor        string  `json:"vendor"                       search:"keyword"`
+	UnitPrice     float64 `json:"unit_price"`
+	UnitWeightKg  float64 `json:"unit_weight_kg"`
+	Qty           int     `json:"qty"`
+	FulfilledQty  int     `json:"fulfilled_qty"`
+	TotalPrice    float64 `json:"total_price"`
+	TotalWeightKg float64 `json:"total_weight_kg"`
+}
+
+type OrderStatus string
+
+const (
+	OrderStatusNew       OrderStatus = "new"
+	OrderStatusCompleted OrderStatus = "completed"
+	OrderStatusCancelled OrderStatus = "cancelled"
+)
+
+type OrderPaymentStatus string
+
+const (
+	OrderPaymentStatusUnpaid            OrderPaymentStatus = "unpaid"
+	OrderPaymentStatusPending           OrderPaymentStatus = "pending"
+	OrderPaymentStatusPartiallyPaid     OrderPaymentStatus = "partially-paid"
+	OrderPaymentStatusPaid              OrderPaymentStatus = "paid"
+	OrderPaymentStatusPartiallyRefunded OrderPaymentStatus = "partially-refunded"
+	OrderPaymentStatusRefunded          OrderPaymentStatus = "refunded"
+	OrderPaymentStatusVoided            OrderPaymentStatus = "voided"
+	OrderPaymentStatusAuthorised        OrderPaymentStatus = "authorised"
+)
diff --git a/search/opensearch_types.go b/search/opensearch_types.go
index 72c048ad80c0655e974af0af735b2cba37f332c6..cdbe211102bf8b240f12598b0f84df2588bae1c6 100644
--- a/search/opensearch_types.go
+++ b/search/opensearch_types.go
@@ -3,11 +3,6 @@ package search
 import "time"
 
 //Mapping configures an index in OpenSearch
-type Index struct {
-	Settings Settings `json:"settings"`
-	Mappings Mappings `json:"mappings"`
-}
-
 type Settings struct {
 	Index *SettingsIndex `json:"index,omitempty"`
 }
@@ -22,7 +17,7 @@ type Mappings struct {
 }
 
 type MappingProperty struct {
-	Type       string                            `json:"type"`
+	Type       string                            `json:"type,omitempty"` //empty for sub-structs described with properties
 	Enabled    bool                              `json:"enabled,omitempty"`
 	Fields     map[string]MappingFieldProperties `json:"fields,omitempty"`
 	Properties map[string]MappingProperty        `json:"properties,omitempty"`
@@ -45,18 +40,23 @@ type SearchRequestBody struct {
 
 type Query struct {
 	//one of:
-	Match      *QueryNameValue  `json:"match,omitempty" doc:"<field>:<value>"`
-	Term       *QueryNameValue  `json:"term,omitempty"`
-	Range      *QueryRange      `json:"range,omitempty"`
-	MultiMatch *QueryMultiMatch `json:"multi_match,omitempty"`
-	Bool       *QueryBool       `json:"bool,omitempty"`
+	Match       *QueryNameValue  `json:"match,omitempty" doc:"<field>:<value>"`
+	Term        *QueryNameValue  `json:"term,omitempty"`
+	Range       *QueryRange      `json:"range,omitempty"`
+	MultiMatch  *QueryMultiMatch `json:"multi_match,omitempty"`
+	Bool        *QueryBool       `json:"bool,omitempty"`
+	QueryString *QueryString     `json:"query_string,omitempty"`
 }
 
 type QueryMultiMatch struct {
-	Query  string   `json:"query" doc:"Text search in below fields"`
+	Query  string   `json:"query" doc:"Full value match search in selected fields"`
 	Fields []string `json:"fields,omitempty" doc:"List of fields"`
 }
 
+type QueryString struct {
+	Query string `json:"query" doc:"Text search with partial matches, using asterisk for optional or question mark for required wildcards before and/or after text"`
+}
+
 //https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
 type QueryBool struct {
 	Must    []Query `json:"must,omitempty" docs:"List of things that must appear in matching documents and will contribute to the score."`
@@ -160,3 +160,25 @@ type HitDoc struct {
 	Score  float64                `json:"_score"`  //
 	Source map[string]interface{} `json:"_source"` //the document of itemType
 }
+
+//Get Response Body Example:
+// {
+// 	"_index": "go-utils-search-docs-test",
+// 	"_type": "_doc",
+// 	"_id": "836c6443-5b0e-489b-aa0f-712ebed96841",
+// 	"_version": 1,
+// 	"_seq_no": 6,
+// 	"_primary_term": 1,
+// 	"found": true,
+// 	"_source": { ... }
+//  }
+type GetResponseBody struct {
+	Index       string                 `json:"_index"` //name of index
+	Type        string                 `json:"_type"`  //_doc
+	ID          string                 `json:"_id"`
+	Version     int                    `json:"_version"`
+	SeqNo       int                    `json:"_seq_no"`
+	PrimaryTerm int                    `json:"_primary_term"`
+	Found       bool                   `json:"found"`
+	Source      map[string]interface{} `json:"_source"` //the document of itemType
+}
diff --git a/search/search_test.go b/search/search_test.go
index 0deef6b0234d07bbf1c1fa2274004b3ced4a47d3..8a03042b26c6f4cc9b6a3d4c94f5c9e8e019cd8b 100644
--- a/search/search_test.go
+++ b/search/search_test.go
@@ -107,7 +107,7 @@ type testStruct struct {
 	Test1      string   `json:"test1"`
 	Test2      string   `json:"test2"`
 	Test3      int      `json:"test3"`
-	HTTP       httpData `json:"http"`
+	HTTP       httpData `json:"http"` //this is a sub-struct...
 	HTTPMethod string   `json:"http_method" search:"keyword"`
 	HTTPPath   string   `json:"http_path" search:"keyword"`
 }
@@ -115,6 +115,7 @@ type testStruct struct {
 type httpData struct {
 	Method string `json:"method" search:"keyword"`
 	Path   string `json:"path" search:"keyword"`
+	Size   int    `json:"size" search:"long"`
 }
 
 func TestOlderThan(t *testing.T) {
@@ -133,3 +134,7 @@ func TestOlderThan(t *testing.T) {
 	t1 = t1.Add(-time.Hour * 24 * time.Duration(olderThanDays))
 	t.Logf("Threshold = %s", t1)
 }
+
+func TestTime(t *testing.T) {
+	t.Logf("Time: %s", time.Now().Format("2006-01-02T15:04:05Z07:00"))
+}
diff --git a/search/time_series.go b/search/time_series.go
index 3a13720a1706ca99d1fcb3d93176804ced4bcc84..0fddffae5ef8db808f628fb0b563658060556f4a 100644
--- a/search/time_series.go
+++ b/search/time_series.go
@@ -13,10 +13,11 @@ import (
 	opensearchapi "github.com/opensearch-project/opensearch-go/opensearchapi"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/logs"
 	"gitlab.com/uafrica/go-utils/reflection"
 )
 
-const TimeFormat = "2006-01-02T15:04:05+07:00"
+const TimeFormat = "2006-01-02T15:04:05Z07:00"
 
 //embed this into your log struct
 type TimeSeriesHeader struct {
@@ -31,22 +32,18 @@ type TimeSeries interface {
 }
 
 type timeSeries struct {
-	w             *writer
-	name          string
-	dataType      reflect.Type
-	fields        []dataField
-	jsonIndexSpec []byte
-	createdDates  map[string]bool
+	w            *writer
+	name         string
+	dataType     reflect.Type
+	settings     Settings
+	mappings     Mappings
+	jsonSettings []byte
+	jsonMappings []byte
+	createdDates map[string]bool
 
 	searchResponseBodyType reflect.Type
 }
 
-type dataField struct {
-	name    string
-	index   []int
-	mapping MappingProperty
-}
-
 //purpose:
 //	create a time series to write e.g. api logs
 //parameters:
@@ -78,52 +75,118 @@ func (w *writer) TimeSeries(name string, tmpl interface{}) (TimeSeries, error) {
 		w:            w,
 		name:         name,
 		dataType:     structType,
-		fields:       []dataField{},
 		createdDates: map[string]bool{},
 	}
 
 	//define the OpenSearch index mapping
-	indexSpec := Index{
-		Settings: Settings{
-			Index: &SettingsIndex{
-				NumberOfShards:   4,
-				NumberOfReplicas: 0,
-			},
-		},
-		Mappings: Mappings{
-			Properties: map[string]MappingProperty{},
+	ts.settings = Settings{
+		Index: &SettingsIndex{
+			NumberOfShards:   4,
+			NumberOfReplicas: 0,
 		},
 	}
+
+	if properties, err := structMappingProperties(structType); err != nil {
+		return nil, errors.Wrapf(err, "cannot map struct %s", structType)
+	} else {
+		ts.mappings = Mappings{
+			Properties: properties,
+		}
+	}
+
+	var err error
+	ts.jsonSettings, err = json.Marshal(ts.settings)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to marshal index settings")
+	}
+	ts.jsonMappings, err = json.Marshal(ts.mappings)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to marshal index mappings")
+	}
+	logger.Infof("%s Index Mappings: %s", structType, string(ts.jsonMappings))
+
+	//define search response type
+	//similar to SearchResponseBody
+	ts.searchResponseBodyType, err = reflection.CloneType(
+		reflect.TypeOf(SearchResponseBody{}),
+		map[string]reflect.Type{
+			".hits.hits[]._source": ts.dataType,
+		})
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to make search response type for time-series")
+	}
+	w.timeSeriesByName[name] = ts
+	return ts, nil
+}
+
+func structMappingProperties(structType reflect.Type) (map[string]MappingProperty, error) {
+	properties := map[string]MappingProperty{}
 	for i := 0; i < structType.NumField(); i++ {
 		structField := structType.Field(i)
-		dataField := dataField{
-			name:    structField.Name,
-			index:   structField.Index,
-			mapping: MappingProperty{Type: "text"},
+
+		fieldName := structField.Name
+
+		//fields of embedded (anonymous) structs are added at the same level
+		if structField.Anonymous && structField.Type.Kind() == reflect.Struct {
+			subFields, err := structMappingProperties(structField.Type)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to map embedded struct %s", fieldName)
+			}
+			for n, v := range subFields {
+				properties[n] = v
+			}
+			continue
 		}
+
 		if jsonTags := strings.SplitN(structField.Tag.Get("json"), ",", 2); len(jsonTags) > 0 && jsonTags[0] != "" {
-			dataField.name = jsonTags[0]
+			fieldName = jsonTags[0]
 		}
-		if dataField.name == "" {
-			logger.Debugf("Skip %s unnamed field %+v", structType.Name(), structField)
+		if fieldName == "" {
+			logger.Debugf("Skip %s unnamed field %+v", structType, structField)
 			continue
 		}
 
 		//get default type of search value from field type
+		fieldMapping := MappingProperty{Type: "text"}
 		switch structField.Type.Kind() {
 		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-			dataField.mapping = MappingProperty{Type: "long"}
+			fieldMapping = MappingProperty{Type: "long"}
 		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-			dataField.mapping = MappingProperty{Type: "long"}
+			fieldMapping = MappingProperty{Type: "long"}
+		case reflect.Float32, reflect.Float64:
+			fieldMapping = MappingProperty{Type: "float"}
 		case reflect.Bool:
-			dataField.mapping = MappingProperty{Type: "boolean"}
+			fieldMapping = MappingProperty{Type: "boolean"}
 		case reflect.String:
-			dataField.mapping = MappingProperty{Type: "text"}
+			fieldMapping = MappingProperty{Type: "text"}
+
+		case reflect.Slice:
+			//do not indicate slice, just map slice items as sub-items
+			subStructProperties, err := structMappingProperties(structField.Type.Elem())
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to map %s.%s", structType, structField.Name)
+			}
+			fieldMapping = MappingProperty{
+				Properties: subStructProperties,
+			}
+
 		default:
 			if structField.Type == reflect.TypeOf(time.Now()) {
-				dataField.mapping = MappingProperty{Type: "date"}
+				fieldMapping = MappingProperty{Type: "date"}
 			} else {
-				dataField.mapping = MappingProperty{Type: "text"}
+				if structField.Type.Kind() == reflect.Struct {
+					subStructProperties, err := structMappingProperties(structField.Type)
+					if err != nil {
+						return nil, errors.Wrapf(err, "failed to map %s.%s", structType, structField.Name)
+					}
+					fieldMapping = MappingProperty{
+						Properties: subStructProperties,
+					}
+				} else {
+					// fieldMapping = MappingProperty{Type: "text"}
+					// unknown value type... we do not specify mapping and let it use dynamic mapping
+					continue
+				}
 			}
 		}
 
@@ -132,56 +195,24 @@ func (w *writer) TimeSeries(name string, tmpl interface{}) (TimeSeries, error) {
 		case "":
 			//no change
 		case "keyword":
-			dataField.mapping = MappingProperty{Type: "keyword"}
+			fieldMapping.Type = "keyword"
 		case "long":
-			dataField.mapping = MappingProperty{Type: "long"}
+			fieldMapping.Type = "long"
 		case "date":
-			dataField.mapping = MappingProperty{Type: "date"}
+			fieldMapping.Type = "data"
 		case "boolean":
-			dataField.mapping = MappingProperty{Type: "boolean"}
+			fieldMapping.Type = "boolean"
 		case "object":
-			dataField.mapping = MappingProperty{Type: "boolean", Enabled: false}
+			fieldMapping.Type = "object"
+			fieldMapping.Enabled = false
 		default:
-			return nil, errors.Errorf("Unknown search:\"%s\" on timeSeries(%s) field(%s)", structField.Tag.Get("search"), name, structField.Name)
+			return nil, errors.Errorf("Unknown search:\"%s\" on field(%s)", structField.Tag.Get("search"), structField.Name)
 		}
 
 		//add to index spec
-		indexSpec.Mappings.Properties[dataField.name] = dataField.mapping
-
-		//add to list of fields
-		ts.fields = append(ts.fields, dataField)
+		properties[fieldName] = fieldMapping
 	}
-
-	//add header fields for all time series to the index spec
-	for n, p := range map[string]MappingProperty{
-		"@timestamp":   {Type: "date"},
-		"@end_time":    {Type: "date"},
-		"@duration_ms": {Type: "long"},
-	} {
-		indexSpec.Mappings.Properties[n] = p
-	}
-
-	var err error
-	ts.jsonIndexSpec, err = json.Marshal(indexSpec)
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to marshal index spec")
-	}
-
-	//define search response type
-	//similar to SearchResponseBody
-	ts.searchResponseBodyType, err = reflection.CloneType(
-		reflect.TypeOf(SearchResponseBody{}),
-		map[string]reflect.Type{
-			".hits.hits[]._source": ts.dataType,
-		})
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to make search response type for time-series")
-	}
-
-	//new package: copy type recursively, find index of special field and replace when found....
-
-	w.timeSeriesByName[name] = ts
-	return ts, nil
+	return properties, nil
 }
 
 //data must be of type specified in Writer.TimeSeries(tmpl)
@@ -191,17 +222,33 @@ func (ts *timeSeries) Write(startTime, endTime time.Time, data interface{}) erro
 	}
 	t := reflect.TypeOf(data)
 	if t != ts.dataType {
-		return errors.Errorf("cannot write %T into TimeSeries(%s), expecting %s", data, ts.name, ts.dataType.Name())
+		return errors.Errorf("cannot write %T into TimeSeries(%s), expecting %s", data, ts.name, ts.dataType)
 	}
 
 	//get daily search index to write to, from start time
 	indexName := ts.name + "-" + startTime.Format("20060102")
 	if _, ok := ts.createdDates[indexName]; !ok {
 		//create new index for this date - if not exists
+
 		res, err := ts.w.api.Create(
 			indexName, //index name
 			indexName, //index name also used for document id
-			strings.NewReader(string(ts.jsonIndexSpec)))
+			strings.NewReader(string(ts.jsonSettings)))
+		if err != nil {
+			return errors.Wrapf(err, "failed to create index(%s)", indexName)
+		}
+		switch res.StatusCode {
+		case http.StatusOK:
+		case http.StatusCreated:
+		case http.StatusConflict: //409 = already exists
+		default:
+			return errors.Errorf("failed to create index(%s): %v %s %s", indexName, res.StatusCode, res.Status(), res.String())
+		}
+
+		res, err = opensearchapi.IndicesPutMappingRequest{
+			Index: []string{indexName},
+			Body:  strings.NewReader(string(ts.jsonMappings)),
+		}.Do(context.Background(), ts.w.client)
 		if err != nil {
 			return errors.Wrapf(err, "failed to create index(%s)", indexName)
 		}
@@ -221,9 +268,15 @@ func (ts *timeSeries) Write(startTime, endTime time.Time, data interface{}) erro
 	x.Elem().Field(0).Set(reflect.ValueOf(TimeSeriesHeader{
 		StartTime:  startTime,
 		EndTime:    endTime,
-		DurationMs: int64(endTime.Sub(startTime) / time.Millisecond),
+		DurationMs: endTime.Sub(startTime).Milliseconds(),
 	}))
-	return ts.w.Write(indexName, x.Elem().Interface())
+	if res, err := ts.w.Write(indexName, "", x.Elem().Interface()); err != nil {
+		return err
+	} else {
+		logger.Debugf("IndexResponse: %+v", res)
+	}
+	return nil
+
 }
 
 //parameters:
@@ -309,6 +362,9 @@ type IndexSettings struct {
 //Return:
 //	docs will be a slice of the TimeSeries data type
 func (ts *timeSeries) Search(query Query, limit int64) (docs interface{}, totalCount int, err error) {
+	if ts == nil {
+		return nil, 0, errors.Errorf("time series == nil")
+	}
 	if limit < 0 || limit > 1000 {
 		err = errors.Errorf("limit=%d not 0..1000", limit)
 		return
@@ -328,11 +384,19 @@ func (ts *timeSeries) Search(query Query, limit int64) (docs interface{}, totalC
 		Size:  limit,
 		Query: query,
 	}
+
 	jsonBody, _ := json.Marshal(body)
+	logger.Debugf("Search: %s", string(jsonBody))
 	search := opensearchapi.SearchRequest{
-		Body: bytes.NewReader(jsonBody),
+		Index: []string{ts.name + "-*"},
+		Body:  bytes.NewReader(jsonBody),
 	}
 
+	startTime := time.Now()
+	defer func() {
+		logs.LogSearch(startTime, ts.name+"-*", string(jsonBody))
+	}()
+
 	searchResponse, err := search.Do(context.Background(), ts.w.client)
 	if err != nil {
 		err = errors.Wrapf(err, "failed to search documents")
@@ -347,8 +411,13 @@ func (ts *timeSeries) Search(query Query, limit int64) (docs interface{}, totalC
 		return
 	}
 
+	bodyData, _ := ioutil.ReadAll(searchResponse.Body)
+	logger.Debugf("Response Body: %s", string(bodyData))
+
 	resBodyPtrValue := reflect.New(ts.searchResponseBodyType)
-	if err = json.NewDecoder(searchResponse.Body).Decode(resBodyPtrValue.Interface()); err != nil {
+	//if err = json.NewDecoder(searchResponse.Body).Decode(resBodyPtrValue.Interface()); err != nil {
+	if err = json.Unmarshal(bodyData, resBodyPtrValue.Interface()); err != nil {
+		logger.Errorf("search response body: %s", string(bodyData))
 		err = errors.Wrapf(err, "cannot decode search response body")
 		return
 	}
diff --git a/search/writer.go b/search/writer.go
index 8b492beb3d37445ef5c12010aad75e9c54471484..17a6dd0e0d316cd029d33f091ad735d6819af9ec 100644
--- a/search/writer.go
+++ b/search/writer.go
@@ -1,7 +1,6 @@
 package search
 
 import (
-	"context"
 	"crypto/tls"
 	"encoding/json"
 	"net/http"
@@ -10,11 +9,13 @@ import (
 	opensearch "github.com/opensearch-project/opensearch-go"
 	opensearchapi "github.com/opensearch-project/opensearch-go/opensearchapi"
 	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
 )
 
 type Writer interface {
 	TimeSeries(name string, tmpl interface{}) (TimeSeries, error) //tmpl must embed TimeSeriesHeader as first unanymous field
 	DelOldTimeSeries(name string, olderThanDays int) ([]string, error)
+	DocumentStore(name string, tmpl interface{}) (DocumentStore, error)
 }
 
 func New(config Config) (Writer, error) {
@@ -22,25 +23,27 @@ func New(config Config) (Writer, error) {
 		return nil, errors.Wrapf(err, "invalid config")
 	}
 	w := &writer{
-		config:           config,
-		timeSeriesByName: map[string]TimeSeries{},
+		config:              config,
+		timeSeriesByName:    map[string]TimeSeries{},
+		documentStoreByName: map[string]DocumentStore{},
 	}
 
-	// Initialize the client with SSL/TLS enabled.
-	var err error
-	w.client, err = opensearch.NewClient(opensearch.Config{
+	searchConfig := opensearch.Config{
 		Transport: &http.Transport{
 			TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
 		},
 		Addresses: config.Addresses,
 		Username:  config.Username,
 		Password:  config.Password,
-	})
+	}
+	// Initialize the client with SSL/TLS enabled.
+	var err error
+	w.client, err = opensearch.NewClient(searchConfig)
 	if err != nil {
 		return nil, errors.Wrapf(err, "cannot initialize opensearch connection")
 	}
 	// Print OpenSearch version information on console.
-	//fmt.Println(client.Info())
+	logger.Debugf("Search client created with config: %+v", searchConfig)
 
 	w.api = opensearchapi.New(w.client)
 	return w, nil
@@ -48,30 +51,40 @@ func New(config Config) (Writer, error) {
 
 //implements audit.Auditor
 type writer struct {
-	config           Config
-	client           *opensearch.Client
-	api              *opensearchapi.API
-	timeSeriesByName map[string]TimeSeries
+	config              Config
+	client              *opensearch.Client
+	api                 *opensearchapi.API
+	timeSeriesByName    map[string]TimeSeries
+	documentStoreByName map[string]DocumentStore
 }
 
-func (writer writer) Write(indexName string, doc interface{}) error {
+func (writer writer) Write(indexName string, id string, doc interface{}) (*IndexResponse, error) {
 	if writer.client == nil {
-		return errors.Errorf("writer closed")
+		return nil, errors.Errorf("writer closed")
 	}
-	jsonDoc, err := json.Marshal(doc)
-	if err != nil {
-		return errors.Wrapf(err, "failed to JSON encode document")
+	jsonDocStr, ok := doc.(string)
+	if !ok {
+		jsonDoc, err := json.Marshal(doc)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to JSON encode document")
+		}
+		jsonDocStr = string(jsonDoc)
+	}
+	options := []func(*opensearchapi.IndexRequest){}
+	if id != "" {
+		options = append(options, writer.api.Index.WithDocumentID(id))
 	}
 	indexResponse, err := writer.api.Index(
 		indexName,
-		strings.NewReader(string(jsonDoc)),
+		strings.NewReader(jsonDocStr),
+		options...,
 	)
 	if err != nil {
-		return errors.Wrapf(err, "failed to index document")
+		return nil, errors.Wrapf(err, "failed to index document")
 	}
 	var res IndexResponse
 	if err := json.NewDecoder(indexResponse.Body).Decode(&res); err != nil {
-		return errors.Wrapf(err, "failed to decode JSON response")
+		return nil, errors.Wrapf(err, "failed to decode JSON response")
 	}
 	//success example:
 	//res = map[
@@ -95,39 +108,9 @@ func (writer writer) Write(indexName string, doc interface{}) error {
 	// 	status:400
 	//]
 	if res.Error != nil {
-		return errors.Errorf("failed to insert: %v", res.Error.Reason)
-	}
-	return nil
-}
-
-func (writer writer) Search() ([]interface{}, error) {
-	if writer.client == nil {
-		return nil, errors.Errorf("writer closed")
-	}
-	// Search for the document.
-	content := strings.NewReader(`{
-       "size": 5,
-       "query": {
-           "multi_match": {
-           "query": "miller",
-           "fields": ["title^2", "director"]
-           }
-      }
-    }`)
-
-	search := opensearchapi.SearchRequest{
-		Body: content,
-	}
-
-	searchResponse, err := search.Do(context.Background(), writer.client)
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to search document")
-	}
-	var res interface{}
-	if err := json.NewDecoder(searchResponse.Body).Decode(&res); err != nil {
-		return nil, errors.Wrapf(err, "failed to decode JSON body")
+		return nil, errors.Errorf("failed to insert: %v", res.Error.Reason)
 	}
-	return nil, errors.Errorf("NYI search result processing: %v", res)
+	return &res, nil
 }
 
 type CreateResponse struct {
diff --git a/service/README.md b/service/README.md
index ce8c48cd7ee32ccf67df1508fc992c7af2efa312..6ee5841321774968c539d0bb4794d66da69259e8 100644
--- a/service/README.md
+++ b/service/README.md
@@ -34,7 +34,7 @@ Example:
 
 Where package db then defines:
 
-    func Connector(dbName string) service.IStarter {
+    func Connector(dbName string) service.Starter {
         return &connector{
             dbName: dbName,
             dbConn: nil,
@@ -157,22 +157,17 @@ So:
         ```claim,ok := ctx.Get("claims").(MyClaimStruct)```
 * All fields in params or body structs matching claim names will be overwritten by the time a handler is called.
 
-# Audits
-
-Audit records are written with:
-*   ctx.AuditChange(), or
-*   ctx.AuditWrite()
+# Data Change Audits
 
+Data Change audit records are written with ctx.AuditChange()
 The AuditChange() method logs the changes between an original and new value.
-The AuditWrite() logs all the data given to it.
 
-A handler may write 0..N audit record, there is no check. In general, audits are written to capture changes, and when a handler changes multiple database records, they could all be audited.
 
 # Sending Async Events
 
 Events are sent for async processing with ```ctx.NewEvent()...Send()``` as in this example:
 
-	if _, err := service.NewEvent(ctx, "BILLING").
+	if _, err := ctx.NewEvent(ctx, "BILLING").
 		Type("provider-invoice").
 		RequestID(ctx.RequestID()).
         Delay(time.Second * 5).
diff --git a/service/context.go b/service/context.go
index ce4acc20d11e657db6d6a7209e271f326f7da06a..49595c4e41efb4a280844ba665998d145a298eaf 100644
--- a/service/context.go
+++ b/service/context.go
@@ -9,6 +9,8 @@ import (
 	"gitlab.com/uafrica/go-utils/audit"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/logs"
+	"gitlab.com/uafrica/go-utils/queues"
 	"gitlab.com/uafrica/go-utils/string_utils"
 )
 
@@ -18,8 +20,7 @@ var Ctx Context
 type Context interface {
 	context.Context
 	logger.Logger
-	Producer
-	audit.Auditor
+	queues.Producer
 
 	RequestID() string
 	MillisecondsSinceStart() int64
@@ -51,8 +52,12 @@ type Context interface {
 	ValueOrDefault(name string, defaultValue interface{}) interface{}
 	Data() map[string]interface{}
 
-	//write an audit event
+	//write a data change audit event
 	AuditChange(eventType string, orgValue, newValue interface{})
+
+	//Sleep() does a time.Sleep and record it in log actions so that we can account for the time spent sleeping
+	//vs e.g. time waiting for outgoing API calls or db queries
+	Sleep(dur time.Duration)
 }
 
 //values: are added to context and logger
@@ -78,7 +83,6 @@ func (s service) NewContext(base context.Context, requestID string, values map[s
 		Context:   base,
 		Logger:    l,
 		Producer:  s.Producer,
-		Auditor:   s.Auditor,
 		startTime: time.Now(),
 		requestID: requestID,
 		data:      map[string]interface{}{},
@@ -99,15 +103,13 @@ func (s service) NewContext(base context.Context, requestID string, values map[s
 		Ctx.Debugf("Start(%s)=(%T)%+v", starterName, starterData, starterData)
 	}
 
-
 	return Ctx, nil
 }
 
 type serviceContext struct {
 	context.Context
 	logger.Logger
-	Producer
-	audit.Auditor
+	queues.Producer
 	startTime time.Time
 	requestID string
 	claim     map[string]interface{}
@@ -220,17 +222,22 @@ func (ctx *serviceContext) ValueOrDefault(name string, defaultValue interface{})
 
 func (ctx *serviceContext) AuditChange(eventType string, orgValue, newValue interface{}) {
 	username, _ := ctx.Claim()["username"].(string)
-	event, err := audit.NewEvent(
+	if err := audit.SaveDataChange(
+		ctx.requestID,
 		username, //use username as source (will default to "SYSTEM" if undefined)
 		eventType,
 		orgValue,
 		newValue,
-	)
-	if err != nil {
-		ctx.Errorf("failed to define audit event: %+v", err)
+	); err != nil {
+		ctx.Errorf("failed to save data change: %+v", err)
 		return
 	}
-	if err := ctx.Auditor.WriteEvent(ctx.requestID, event); err != nil {
-		ctx.Errorf("failed to audit change: %+v", err)
+}
+
+func (ctx *serviceContext) Sleep(dur time.Duration) {
+	if dur > 0 {
+		startTime := time.Now()
+		time.Sleep(dur)
+		logs.LogSleep(startTime)
 	}
 }
diff --git a/service/service.go b/service/service.go
index 0a80933a8f94edc92eaf69d8817a390c1b474f2c..3eda7b06bf3ba17c23c70bf2edff773b29c52148 100644
--- a/service/service.go
+++ b/service/service.go
@@ -4,19 +4,17 @@ import (
 	"context"
 	"os"
 
-	"gitlab.com/uafrica/go-utils/audit"
 	"gitlab.com/uafrica/go-utils/errors"
 	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/queues"
 	"gitlab.com/uafrica/go-utils/string_utils"
 )
 
 type Service interface {
 	logger.Logger
-	Producer
-	audit.Auditor
-	WithStarter(name string, starter IStarter) Service
-	WithProducer(producer Producer) Service
-	WithAuditor(auditor audit.Auditor) Service
+	queues.Producer
+	WithStarter(name string, starter Starter) Service
+	WithProducer(producer queues.Producer) Service
 	NewContext(base context.Context, requestID string, values map[string]interface{}) (Context, error)
 }
 
@@ -28,18 +26,16 @@ func New() Service {
 	return service{
 		Producer: nil,
 		Logger:   logger.New().WithFields(map[string]interface{}{"env": env}),
-		Auditor:  audit.None(),
 		env:      env,
-		starters: map[string]IStarter{},
+		starters: map[string]Starter{},
 	}
 }
 
 type service struct {
-	logger.Logger //for logging outside of context
-	Producer      //for sending async events
-	audit.Auditor
-	env      string
-	starters map[string]IStarter
+	logger.Logger   //for logging outside of context
+	queues.Producer //for sending async events
+	env             string
+	starters        map[string]Starter
 }
 
 func (s service) Env() string {
@@ -59,7 +55,7 @@ func (s service) Env() string {
 //you can implement one starter that does everything and return a struct or
 //implement one for your db, one for rate limit, one for ...
 //the name must be snake-case, e.g. "this_is_my_starter_name"
-func (s service) WithStarter(name string, starter IStarter) Service {
+func (s service) WithStarter(name string, starter Starter) Service {
 	if !string_utils.IsSnakeCase(name) {
 		panic(errors.Errorf("invalid starter name=\"%s\", expecting snake_case names only", name))
 	}
@@ -73,16 +69,9 @@ func (s service) WithStarter(name string, starter IStarter) Service {
 	return s
 }
 
-func (s service) WithProducer(producer Producer) Service {
+func (s service) WithProducer(producer queues.Producer) Service {
 	if producer != nil {
 		s.Producer = producer
 	}
 	return s
 }
-
-func (s service) WithAuditor(auditor audit.Auditor) Service {
-	if auditor != nil {
-		s.Auditor = auditor
-	}
-	return s
-}
diff --git a/service/start.go b/service/start.go
index c30c11733164a66a642b6d89f1ce1f264525e465..143a4e15f533bfec0f903b6e2b3344116a5df752 100644
--- a/service/start.go
+++ b/service/start.go
@@ -1,6 +1,6 @@
 package service
 
-type IStarter interface {
+type Starter interface {
 	//called at the start of api/cron/queues processing, before checks, e.g. to ensure we have db connection
 	//i.e. setup things that does not depend on the request/event details
 	//if you need the request details, you need to implement a check for each of the api, cron and/or queue as needed, not a Start() method.
diff --git a/string_utils/key_reader.go b/string_utils/key_reader.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d2bf3052c4d0c2ba91a58a3553dba82e5f74c45
--- /dev/null
+++ b/string_utils/key_reader.go
@@ -0,0 +1,39 @@
+package string_utils
+
+import (
+	"os"
+	"strings"
+)
+
+//KeyReader is an interface to read string "<key>":"<value>" pairs
+//which is common to read from the environment
+//it is abstracted so the same interface can be implemented for
+//reading for example from REDIS and other sources
+type KeyReader interface {
+	Keys(prefix string) []string
+	GetString(key string) (value string, ok bool)
+}
+
+func EnvironmentKeyReader() KeyReader {
+	return envKeyReader{}
+}
+
+type envKeyReader struct{}
+
+func (envKeyReader) GetString(key string) (value string, ok bool) {
+	value = os.Getenv(key)
+	if value == "" {
+		return "", false
+	}
+	return value, true
+}
+
+func (envKeyReader) Keys(prefix string) []string {
+	keys := []string{}
+	for _, env := range os.Environ() {
+		if strings.HasPrefix(env, prefix) {
+			keys = append(keys, strings.SplitN(env, "=", 2)[0])
+		}
+	}
+	return keys
+}
diff --git a/struct_utils/json.go b/struct_utils/json.go
new file mode 100644
index 0000000000000000000000000000000000000000..ee887f5dcb882a7251a8c40b5c24cfb3f9a53619
--- /dev/null
+++ b/struct_utils/json.go
@@ -0,0 +1,20 @@
+package struct_utils
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// UnmarshalJSON performs a JSON unmarshalling, but on type mismatches it returns a more user friendly error.
+// Used exactly the same as json.Unmarshal
+func UnmarshalJSON(data []byte, val interface{}) error {
+	err := json.Unmarshal(data, &val)
+	if err != nil {
+		typeErr, ok := err.(*json.UnmarshalTypeError)
+		if ok {
+			return fmt.Errorf("invalid type received for field '%s': expected a value of type '%s', but received type '%s'", typeErr.Field, typeErr.Type.Name(), typeErr.Value)
+		}
+	}
+
+	return err
+}
diff --git a/struct_utils/named_values_to_struct.go b/struct_utils/named_values_to_struct.go
new file mode 100644
index 0000000000000000000000000000000000000000..81c58360fad45405036a43ae412fc36d84fd1486
--- /dev/null
+++ b/struct_utils/named_values_to_struct.go
@@ -0,0 +1,302 @@
+package struct_utils
+
+import (
+	"encoding/csv"
+	"encoding/json"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/string_utils"
+)
+
+//Purpose:
+//	Make a list of named values from the env for parsing into a struct
+//
+//Parameters:
+//	prefix should be uppercase (by convention) env prefix like "MY_LIB_CONFIG", without trailing "_"
+//
+//Result:
+//	named values that can be passed into UnmarshalNamedValues()
+//
+//All env starting with "<prefix>_" will be copied without "<prefix>_"
+//Examples with prefix="MY_LIB_CONFIG":
+// MY_LIB_CONFIG_MAX_SIZE="6"        -> {"MAX_SIZE":["6"]}			one value of "6"
+// MY_LIB_CONFIG_NAMES   ="A,B,C"    -> {"NAMES":["A,B,C"]}         one value of "A,B,C"
+// MY_LIB_CONFIG_NRS     ="1,2,3"    -> {"NRS":["1,2,3"]}           one value of "1,2,3" (all env values are string, later parsed into int based on struct field type)
+// MY_LIB_CONFIG_CODES   ="[1,2,3]"" -> {"CODES":["1","2","3"]}     3 values of "1", "2" and "3" because of outer [...], env values are string
+//
+// MY_LIB_CONFIG_CODES_1=5
+// MY_LIB_CONFIG_CODES_5=7
+// MY_LIB_CONFIG_CODES_2=10          -> {"CODES":["5","10","7"]}	3 values ordered on suffixes "_1", "_5", "_2" moving 10 before 7
+//
+// MY_LIB_CONFIG_ADDRS=["55 Crescent, Town", "12 Big Street, City"] -> 2 values including commas because of quoted CSV
+func NamedValuesFromEnv(prefix string) map[string][]string {
+	return NamedValuesFromReader(prefix, string_utils.EnvironmentKeyReader())
+}
+
+func NamedValuesFromReader(prefix string, reader string_utils.KeyReader) map[string][]string {
+	if reader == nil {
+		return nil
+	}
+	result := map[string][]string{}
+	prefix += "_"
+	for _, key := range reader.Keys(prefix) {
+		value, ok := reader.GetString(key)
+		key = key[len(prefix):]
+		if !ok {
+			logger.Debugf("Key(%s) undefined", key)
+			continue
+		}
+		logger.Debugf("key(%s)=\"%s\"", key, value)
+		result[strings.ToLower(key)] = []string{value}
+
+		//split only if valid CSV between [...]
+		if value[0] == '[' && value[len(value)-1] == ']' {
+			csvReader := csv.NewReader(strings.NewReader(value[1 : len(value)-1]))
+			csvValues, csvErr := csvReader.Read() //this automatically removes quotes around some/all CSV inside the [...]
+			if csvErr == nil {
+				result[strings.ToLower(key)] = csvValues
+			}
+		}
+	}
+
+	//merge multiple <name>_#=<value> into single lists called <name>
+	namesToDelete := []string{}
+	merged := map[string][]nrWithValues{}
+	for name, values := range result {
+		delimIndex := strings.LastIndex(name, "_")
+		if delimIndex > 0 {
+			nr := name[delimIndex+1:]
+			if i64, err := strconv.ParseInt(nr, 10, 64); err == nil {
+				nameWithoutNr := name[:delimIndex]
+				if _, ok := merged[nameWithoutNr]; !ok {
+					merged[nameWithoutNr] = []nrWithValues{}
+				}
+				merged[nameWithoutNr] = append(merged[nameWithoutNr], nrWithValues{nr: i64, values: values})
+				namesToDelete = append(namesToDelete, name)
+			}
+		}
+	}
+
+	//delete merged values
+	for _, name := range namesToDelete {
+		delete(result, name)
+	}
+
+	//sort and set the merged names with single list of values
+	for nameWithoutNr, nrsWithValues := range merged {
+		if values, ok := result[nameWithoutNr]; ok {
+			nrsWithValues = append(nrsWithValues, nrWithValues{nr: 0, values: values}) //if also defined without _#
+		}
+		sort.Slice(nrsWithValues, func(i, j int) bool { return nrsWithValues[i].nr < nrsWithValues[j].nr })
+
+		list := []string{}
+		for _, nrWithValues := range nrsWithValues {
+			list = append(list, nrWithValues.values...)
+		}
+		result[nameWithoutNr] = list
+	}
+	return result
+}
+
+type nrWithValues struct {
+	nr     int64
+	values []string
+}
+
+//converts query string params to named values that can be parsed into a struct
+//it support both single/multi-value params, depending how you get them from your HTTP library
+//    (e.g. AWS API Gateway Context returns both but default golang net/http returns only params)
+func NamedValuesFromURL(params map[string]string, multiValueParams map[string][]string) map[string][]string {
+	result := map[string][]string{}
+	for n, v := range params {
+		result[n] = []string{v}
+	}
+	for n, mv := range multiValueParams {
+		if list, ok := result[n]; !ok {
+			result[n] = mv
+		} else {
+			//do not add duplicates - seems like AWS put same value in both single and multivalue params
+			for _, v := range mv {
+				found := false
+				for _, existingValue := range list {
+					if v == existingValue {
+						found = true
+					}
+				}
+				if !found {
+					list = append(list, v)
+				}
+			}
+			result[n] = list
+		}
+	}
+
+	for name, values := range result {
+		splitValues := []string{}
+		for _, value := range values {
+			//split only if valid CSV between [...]
+			if value[0] == '[' && value[len(value)-1] == ']' {
+				csvReader := csv.NewReader(strings.NewReader(value[1 : len(value)-1]))
+				csvValues, csvErr := csvReader.Read() //this automatically removes quotes around some/all CSV inside the [...]
+				if csvErr == nil {
+					splitValues = append(splitValues, csvValues...)
+				} else {
+					splitValues = append(splitValues, value) //cannot split this "[...]" value
+				}
+			} else {
+				splitValues = append(splitValues, value) //not a "[...]" value
+			}
+		}
+		result[name] = splitValues
+	}
+	return result
+}
+
+// Purpose:
+// 	UnmarshalNamedValues() parses a set of named values into a struct using json tag matching
+//  Unlike json.Unmarshal(), it takes care of converting quoted "true" -> true, "1" -> int(1) etc...
+//
+//	Typically used to parse environment or URL params into a struct
+//	because normal json.Unmarshal() will fail to parse "2" into an integer etc
+//
+//	By convention, the names should be lowercase to match json tag with "_" delimeters
+//	And also use "_" for nested sub-struct names
+//	  named value "a_b_c_d":5 would be stored in
+//    field with json tag "a_b_c_d" or
+//	  field with json tag "a_b"        which is a struct with a json tagged field "c_d" etc...
+//
+// Parameters:
+// 	namedValues is name-value pairs, typical from URL params or OS environment
+//		see construction functions for this:
+//			NamedValuesFromEnv()
+//			NamedValuesFromURL()
+//
+// 	structPtr must be ptr to a struct variable
+//		undefined values will not be changed, so you can call this multiple times on the
+//		same struct to amend a few values, leaving the rest and default values unchanged
+//
+// Return:
+//	unused values
+// 	nil or error if some values could not be used
+//
+//	If all values must be used, check len(unusedValues) when err==nil
+//
+func UnmarshalNamedValues(namedValues map[string][]string, structPtr interface{}) (unusedValues map[string][]string, err error) {
+	if structPtr == nil {
+		return nil, errors.Errorf("cannot unmarshal into nil")
+	}
+	structPtrType := reflect.TypeOf(structPtr)
+	if structPtrType.Kind() != reflect.Ptr || structPtrType.Elem().Kind() != reflect.Struct {
+		return nil, errors.Errorf("%T is not &struct", structPtr)
+	}
+	structType := structPtrType.Elem()
+	structPtrValue := reflect.ValueOf(structPtr)
+
+	if usedNameList, err := unmarshalNamedValuesIntoStructPtr("", namedValues, structType, structPtrValue); err != nil {
+		return namedValues, err
+	} else {
+		for _, usedName := range usedNameList {
+			delete(namedValues, usedName)
+		}
+	}
+	return namedValues, nil
+}
+
+func unmarshalNamedValuesIntoStructPtr(prefix string, namedValues map[string][]string, structType reflect.Type, structPtrValue reflect.Value) (usedNameList []string, err error) {
+	usedNameList = []string{}
+	for i := 0; i < structType.NumField(); i++ {
+		structTypeField := structType.Field(i)
+		fieldName := (strings.SplitN(structTypeField.Tag.Get("json"), ",", 2))[0]
+		if fieldName == "-" {
+			continue //skip fields excluded from JSON
+		}
+		if prefix != "" {
+			if fieldName != "" {
+				fieldName = prefix + "_" + fieldName
+			} else {
+				fieldName = prefix
+			}
+		}
+
+		//recurse into anonymous sub-structs
+		if structTypeField.Type.Kind() == reflect.Struct {
+			if nameList, err := unmarshalNamedValuesIntoStructPtr(fieldName, namedValues, structTypeField.Type, structPtrValue.Elem().Field(i).Addr()); err != nil {
+				return nil, errors.Wrapf(err, "failed on %s.%s", structType.Name(), fieldName)
+			} else {
+				usedNameList = append(usedNameList, nameList...)
+			}
+			continue
+		}
+
+		fieldValues, ok := namedValues[fieldName]
+		if !ok {
+			continue //skip undefined fields
+		}
+		usedNameList = append(usedNameList, fieldName)
+		delete(namedValues, fieldName)
+		if len(fieldValues) == 0 {
+			continue //field has no value specified in URL, do not remove values not defined (cannot clear defined struct fields with named values)
+		}
+		structPtrFieldValue := structPtrValue.Elem().Field(i)
+		if structPtrFieldValue.Kind() == reflect.Ptr {
+			//this is a ptr, allocate a new value and set it
+			//then we can dereference to set it below
+			structPtrFieldValue.Set(reflect.New(structPtrFieldValue.Type().Elem()))
+			structPtrFieldValue = structPtrFieldValue.Elem()
+		}
+
+		//param is defined >=1 times in URL
+		if structTypeField.Type.Kind() == reflect.Slice {
+			//this param struct field is a slice, iterate over all specified values
+			for i, fieldValue := range fieldValues {
+				parsedValue, parseErr := unmarshalValue(fieldValue, structTypeField.Type.Elem())
+				if parseErr != nil {
+					err = errors.Wrapf(parseErr, "invalid %s[%d]", fieldName, i)
+					return
+				}
+				structPtrFieldValue.Set(reflect.Append(structPtrFieldValue, parsedValue))
+
+				//todo: sorting of list using value names as applicable
+			}
+		} else {
+			//field is not a slice, expecting only a single value
+			if len(fieldValues) > 1 {
+				err = errors.Errorf("%s cannot store multiple value (%d found: %+v)", fieldName, len(fieldValues), fieldValues)
+				return
+			}
+			parsedValue, parseErr := unmarshalValue(fieldValues[0], structPtrFieldValue.Type())
+			if parseErr != nil {
+				err = errors.Wrapf(parseErr, "invalid %s", fieldName)
+				return
+			}
+			structPtrFieldValue.Set(parsedValue)
+		}
+	} //for each param struct field
+	return usedNameList, nil
+}
+
+func unmarshalValue(v interface{}, t reflect.Type) (reflect.Value, error) {
+	newValuePtr := reflect.New(t)
+	if reflect.ValueOf(v).Type().AssignableTo(t) {
+		newValuePtr.Elem().Set(reflect.ValueOf(v)) //can assign as is
+	} else {
+		//needs conversion
+		s, ok := v.(string)
+		if !ok {
+			jsonValue, _ := json.Marshal(v)
+			s = string(jsonValue)
+		}
+		//is string value, unmarshal as quoted or unquoted JSON value
+		if err := json.Unmarshal([]byte("\""+s+"\""), newValuePtr.Interface()); err != nil {
+			if err := json.Unmarshal([]byte(s), newValuePtr.Interface()); err != nil {
+				return newValuePtr.Elem(), errors.Wrapf(err, "invalid \"%s\"", s)
+			}
+		}
+	}
+	return newValuePtr.Elem(), nil
+}
diff --git a/struct_utils/named_values_to_struct_test.go b/struct_utils/named_values_to_struct_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..533cb16b58a1a1c194815718f2ff92141f18129a
--- /dev/null
+++ b/struct_utils/named_values_to_struct_test.go
@@ -0,0 +1,210 @@
+package struct_utils_test
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/url"
+	"os"
+	"strings"
+	"testing"
+	"time"
+
+	"gitlab.com/uafrica/go-utils/errors"
+	"gitlab.com/uafrica/go-utils/logger"
+	"gitlab.com/uafrica/go-utils/struct_utils"
+)
+
+func TestEnv(t *testing.T) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+	//booleans
+	os.Setenv("TEST_VALUE_ENABLE_CACHE", "true")
+	os.Setenv("TEST_VALUE_DISABLE_LOG", "true")
+	os.Setenv("TEST_VALUE_ADMIN", "false")
+
+	//integers
+	os.Setenv("TEST_VALUE_MAX_SIZE", "12")
+
+	os.Setenv("TEST_VALUE_SEQ1", "[4,5,6]") //list in one value
+
+	os.Setenv("TEST_VALUE_SEQ2_10", "10") //numbered list elements
+	os.Setenv("TEST_VALUE_SEQ2_20", "20")
+	os.Setenv("TEST_VALUE_SEQ2_4", "4")
+	os.Setenv("TEST_VALUE_SEQ2_15", "15")
+	os.Setenv("TEST_VALUE_SEQ2", "100")
+
+	os.Setenv("TEST_VALUE_CUTOFF", "2021-11-20T12:00:00+02:00")
+	os.Setenv("TEST_VALUE_HOLIDAYS", "[2021-03-21,2021-04-27,2021-05-01,2021-06-16,2021-08-09,2021-12-16,2021-12-25]")
+
+	//=====[ TEST THIS FUNCTION ]=====
+	nv := struct_utils.NamedValuesFromEnv("TEST_VALUE")
+
+	testNamedValues(t, nv)
+}
+
+func TestURL1(t *testing.T) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+
+	queryParams := map[string]string{
+		"enable_cache": "true",
+		"disable_log":  "true",
+		"admin":        "false",
+		"max_size":     "12",
+		"seq1":         "[4,5,6]",
+		"seq2":         "[100,4,10,15,20]", //url does not support _# numbering of params in a list, only [csv] or multi-value with seq2=100&seq2=4&... testing in TestURL2()
+		"cutoff":       "2021-11-20T12:00:00+02:00",
+		"holidays":     "[2021-03-21,2021-04-27,2021-05-01,2021-06-16,2021-08-09,2021-12-16,2021-12-25]",
+	}
+
+	//=====[ TEST THIS FUNCTION ]=====
+	nv := struct_utils.NamedValuesFromURL(queryParams, nil)
+	testNamedValues(t, nv)
+}
+
+func TestURL2(t *testing.T) {
+	logger.SetGlobalFormat(logger.NewConsole())
+	logger.SetGlobalLevel(logger.LevelDebug)
+
+	queryParams := map[string]string{
+		"disable_log": "true",
+		"admin":       "false",
+		"max_size":    "12",
+		"seq1":        "4",
+		"cutoff":      "2021-11-20T12:00:00+02:00",
+	}
+	multiValueParams := map[string][]string{
+		"enable_cache": {"true"},
+		"seq1":         {"5", "6"}, //merged with above "4"
+		"seq2":         {"100", "4", "10", "15", "20"},
+		"holidays":     {"2021-03-21", "2021-04-27", "2021-05-01", "2021-06-16", "2021-08-09", "2021-12-16", "2021-12-25"},
+	}
+
+	//=====[ TEST THIS FUNCTION ]=====
+	nv := struct_utils.NamedValuesFromURL(queryParams, multiValueParams)
+	testNamedValues(t, nv)
+}
+
+func TestURL3(t *testing.T) {
+	urlString := "/test?admin=false&cutoff=2021-11-20T12%3A00%3A00%2B02%3A00&disable_log=true&enable_cache=true&holidays=2021-03-21&holidays=2021-04-27&holidays=2021-05-01&holidays=2021-06-16&holidays=2021-08-09&holidays=2021-12-16&holidays=2021-12-25&max_size=12&seq1=4&seq1=5&seq1=6&seq2=100&seq2=4&seq2=10&seq2=15&seq2=20"
+	u, err := url.Parse(urlString)
+	if err != nil {
+		t.Fatalf("cannot parse URL")
+	}
+	nv := struct_utils.NamedValuesFromURL(nil, u.Query())
+	testNamedValues(t, nv)
+}
+
+func testNamedValues(t *testing.T, nv map[string][]string) {
+	//assets expected values
+	exp := map[string][]string{
+		"enable_cache": {"true"},
+		"disable_log":  {"true"},
+		"admin":        {"false"},
+		"max_size":     {"12"},
+		"seq1":         {"4", "5", "6"},
+		"seq2":         {"100", "4", "10", "15", "20"}, //order is important
+		"cutoff":       {"2021-11-20T12:00:00+02:00"},
+		"holidays":     {"2021-03-21", "2021-04-27", "2021-05-01", "2021-06-16", "2021-08-09", "2021-12-16", "2021-12-25"},
+	}
+	if len(nv) != len(exp) {
+		t.Fatalf("len(nv)=%d != len(exp)=%d: %+v", len(nv), len(exp), nv)
+	}
+
+	for name, expValues := range exp {
+		if envValues, ok := nv[name]; !ok {
+			t.Fatalf("%s not set in %+v", name, nv)
+		} else {
+			if len(expValues) != len(envValues) {
+				t.Fatalf("%s has %d != %d values (%v != %v)", name, len(envValues), len(expValues), envValues, expValues)
+			}
+			for i, v := range envValues {
+				if v != expValues[i] {
+					t.Fatalf("%s[%d] = \"%s\" != \"%s\"", name, i, v, expValues[i])
+				}
+			}
+		}
+	}
+
+	//=====[ PARSE INTO STRUCT ]==========
+	c := Config{}
+	unused, err := struct_utils.UnmarshalNamedValues(nv, &c)
+	if err != nil {
+		t.Fatalf("failed: %+v", err)
+	}
+	if len(unused) != 0 {
+		t.Fatalf("unused: %+v", unused)
+	}
+	t.Logf("parsed struct: %+v", c)
+
+	{
+		e := json.NewEncoder(os.Stdout)
+		e.SetIndent("", "\t")
+		e.Encode(c)
+	}
+
+	if !c.EnableCache || !c.DisableLog || c.Admin {
+		t.Fatalf("wrong bool values: %+v", c)
+	}
+	if c.MaxSize != 12 {
+		t.Fatalf("wrong nr values: %+v", c)
+	}
+	if len(c.Seq1) != 3 || c.Seq1[0] != 4 || c.Seq1[1] != 5 || c.Seq1[2] != 6 {
+		t.Fatalf("wrong seq1: %+v", c)
+	}
+	if len(c.Seq2) != 5 || c.Seq2[0] != 100 || c.Seq2[1] != 4 || c.Seq2[2] != 10 || c.Seq2[3] != 15 || c.Seq2[4] != 20 {
+		t.Fatalf("wrong seq2: %+v", c)
+	}
+	if c.Cutoff.UTC().Format("2006-01-02 15:04:05") != "2021-11-20 10:00:00" {
+		t.Fatalf("wrong cutoff")
+	}
+	if len(c.Holidays) != 7 ||
+		c.Holidays[0].String() != "2021-03-21" ||
+		c.Holidays[1].String() != "2021-04-27" ||
+		c.Holidays[2].String() != "2021-05-01" ||
+		c.Holidays[3].String() != "2021-06-16" ||
+		c.Holidays[4].String() != "2021-08-09" ||
+		c.Holidays[5].String() != "2021-12-16" ||
+		c.Holidays[6].String() != "2021-12-25" {
+		t.Fatalf("wrong holidays")
+	}
+}
+
+type Config struct {
+	EnableCache bool      `json:"enable_cache"`
+	DisableLog  bool      `json:"disable_log"`
+	Admin       bool      `json:"admin"`
+	MaxSize     int64     `json:"max_size"`
+	Seq1        []int     `json:"seq1"`
+	Seq2        []int64   `json:"seq2"`
+	Cutoff      time.Time `json:"cutoff"`
+	Holidays    []Date    `json:"holidays"`
+}
+
+type Date struct {
+	Y, M, D int
+}
+
+func (d *Date) Scan(value []byte) error {
+	s := strings.Trim(string(value), "\"")
+	v, err := time.ParseInLocation("2006-01-02", s, time.Now().Location())
+	if err != nil {
+		return errors.Errorf("%s is not CCYY-MM-DD", s)
+	}
+	d.Y = v.Year()
+	d.M = int(v.Month())
+	d.D = v.Day()
+	return nil
+}
+
+func (d *Date) UnmarshalJSON(value []byte) error {
+	return d.Scan(value)
+}
+
+func (d Date) String() string {
+	return fmt.Sprintf("%04d-%02d-%02d", d.Y, d.M, d.D)
+}
+
+func (d Date) MarshalJSON() ([]byte, error) {
+	return []byte("\"" + d.String() + "\""), nil
+}