Skip to content

Commit

Permalink
consolidate kafka lua pool init
Browse files Browse the repository at this point in the history
  • Loading branch information
serprex committed May 7, 2024
1 parent 3edd459 commit c175ea0
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 39 deletions.
29 changes: 19 additions & 10 deletions flow/connectors/kafka/kafka.go
Original file line number Diff line number Diff line change
Expand Up @@ -165,39 +165,48 @@ func lvalueToKafkaRecord(ls *lua.LState, value lua.LValue) (*kgo.Record, error)
return kr, nil
}

func (c *KafkaConnector) SyncRecords(ctx context.Context, req *model.SyncRecordsRequest[model.RecordItems]) (*model.SyncResponse, error) {
wgCtx, wgErr := context.WithCancelCause(ctx)
func (c *KafkaConnector) createPool(
ctx context.Context,
script string,
flowJobName string,
wgErr func(error),
) (*utils.LPool[[]*kgo.Record], error) {
produceCb := func(_ *kgo.Record, err error) {
if err != nil {
wgErr(err)
}
}

numRecords := atomic.Int64{}
tableNameRowsMapping := utils.InitialiseTableRowsMap(req.TableMappings)

pool, err := utils.LuaPool(func() (*lua.LState, error) {
ls, err := utils.LoadScript(wgCtx, req.Script, func(ls *lua.LState) int {
return utils.LuaPool(func() (*lua.LState, error) {
ls, err := utils.LoadScript(ctx, script, func(ls *lua.LState) int {
top := ls.GetTop()
ss := make([]string, top)
for i := range top {
ss[i] = ls.ToStringMeta(ls.Get(i + 1)).String()
}
_ = c.LogFlowInfo(ctx, req.FlowJobName, strings.Join(ss, "\t"))
_ = c.LogFlowInfo(ctx, flowJobName, strings.Join(ss, "\t"))
return 0
})
if err != nil {
return nil, err
}
if req.Script == "" {
if script == "" {
ls.Env.RawSetString("onRecord", ls.NewFunction(utils.DefaultOnRecord))
}
return ls, nil
}, func(krs []*kgo.Record) {
for _, kr := range krs {
c.client.Produce(wgCtx, kr, produceCb)
c.client.Produce(ctx, kr, produceCb)
}
})
}

func (c *KafkaConnector) SyncRecords(ctx context.Context, req *model.SyncRecordsRequest[model.RecordItems]) (*model.SyncResponse, error) {
numRecords := atomic.Int64{}
tableNameRowsMapping := utils.InitialiseTableRowsMap(req.TableMappings)

wgCtx, wgErr := context.WithCancelCause(ctx)
pool, err := c.createPool(wgCtx, req.Script, req.FlowJobName, wgErr)
if err != nil {
return nil, err
}
Expand Down
31 changes: 2 additions & 29 deletions flow/connectors/kafka/qrep.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,43 +27,16 @@ func (c *KafkaConnector) SyncQRepRecords(
stream *model.QRecordStream,
) (int, error) {
startTime := time.Now()
numRecords := atomic.Int64{}
schema := stream.Schema()

wgCtx, wgErr := context.WithCancelCause(ctx)
produceCb := func(_ *kgo.Record, err error) {
if err != nil {
wgErr(err)
}
}

pool, err := utils.LuaPool(func() (*lua.LState, error) {
ls, err := utils.LoadScript(wgCtx, config.Script, func(ls *lua.LState) int {
top := ls.GetTop()
ss := make([]string, top)
for i := range top {
ss[i] = ls.ToStringMeta(ls.Get(i + 1)).String()
}
_ = c.LogFlowInfo(ctx, config.FlowJobName, strings.Join(ss, "\t"))
return 0
})
if err != nil {
return nil, err
}
if config.Script == "" {
ls.Env.RawSetString("onRecord", ls.NewFunction(utils.DefaultOnRecord))
}
return ls, nil
}, func(krs []*kgo.Record) {
for _, kr := range krs {
c.client.Produce(wgCtx, kr, produceCb)
}
})
pool, err := c.createPool(wgCtx, config.Script, config.FlowJobName, wgErr)
if err != nil {
return 0, err
}
defer pool.Close()

numRecords := atomic.Int64{}
Loop:
for {
select {
Expand Down

0 comments on commit c175ea0

Please sign in to comment.