276: // MsgsToProcessedMsgs converts msgs to processed msgs.
277: // It splits msgs into chunks of 5 msgs and creates processed msgs for each chunk.
278: func MsgsToProcessedMsgs(queues map[string][]sdk.Msg) []btypes.ProcessedMsgs {
279:     res := make([]btypes.ProcessedMsgs, 0)
280:     for sender := range queues {
281:         msgs := queues[sender]
282:         for i := 0; i < len(msgs); i += 5 {
283:             end := i + 5
284:             if end > len(msgs) {
285:                 end = len(msgs)
286:             }
287:
288:             res = append(res, btypes.ProcessedMsgs{
289:                 Sender:    sender,
290:                 Msgs:      slices.Clone(msgs[i:end]),
291:                 Timestamp: types.CurrentNanoTimestamp(),
292:                 Save:      true,
293:             })
294:         }
295:     }
296:     return res
297: }
70: func (p ProcessedMsgs) Key() []byte {
71: 	return prefixedProcessedMsgs(types.MustInt64ToUint64(p.Timestamp))
72: }
88: // DeleteProcessedMsgs deletes processed messages
89: func DeleteProcessedMsgs(db types.BasicDB, processedMsgs btypes.ProcessedMsgs) error {
90: 	return db.Delete(processedMsgs.Key())
91: }
093: // SaveProcessedMsgsBatch saves all processed messages in the batch
094: func SaveProcessedMsgsBatch(db types.BasicDB, cdc codec.Codec, processedMsgsBatch []btypes.ProcessedMsgs) error {
095: 	for _, processedMsgs := range processedMsgsBatch {
096: 		if !processedMsgs.Save {
097: 			continue
098: 		}
099:
100: 		data, err := processedMsgs.Value(cdc)
101: 		if err != nil {
102: 			return err
103: 		}
104:
105: 		err = db.Set(processedMsgs.Key(), data)
106: 		if err != nil {
107: 			return err
108: 		}
109: 	}
110: 	return nil
111: }
func TestEndBlockHandlerMultipleBatches(t *testing.T) {
	childCodec, _, _ := childprovider.GetCodec("init")

	child := NewMockChild(nil, childCodec, "sender0", "sender1", 1)
	db, err := db.NewMemDB()
	defer func() {
		require.NoError(t, db.Close())
	}()
	hostdb := db.WithPrefix([]byte("test_host"))
	require.NoError(t, err)
	hostNode := node.NewTestNode(nodetypes.NodeConfig{}, hostdb, nil, nil, nil, nil)

	h := Host{
		BaseHost: hostprovider.NewTestBaseHost(0, hostNode, ophosttypes.QueryBridgeResponse{}, nodetypes.NodeConfig{}, nil),
		child:    child,
		stage:    hostdb.NewStage(),
	}

	msgQueue := h.GetMsgQueue()
	require.Empty(t, msgQueue["sender0"])

	// append a bunch of messages to the msg queue
	var numMessages = 100

	for i := 0; i < numMessages; i++ {
		h.AppendMsgQueue(&opchildtypes.MsgUpdateOracle{}, "sender0")
	}

	msgQueue = h.GetMsgQueue()
	require.Len(t, msgQueue["sender0"], numMessages)

	h.AppendProcessedMsgs(broadcaster.MsgsToProcessedMsgs(h.GetMsgQueue())...)

	msgs := h.GetProcessedMsgs()
	require.Len(t, msgs, 20) // 100 messages / 5 messages per batch = 20 batches

	// Verify that at least one of the batches has the same timestamp
	for i := 0; i < len(msgs)-1; i++ {
		if msgs[i].Timestamp == msgs[i+1].Timestamp {
			// assert that the db key is the same
			require.Equal(t, msgs[i].Key(), msgs[i+1].Key(), fmt.Sprintf("DB key do not match - %s == %s", msgs[i].Key(), msgs[i+1].Key()))

			require.Fail(t, fmt.Sprintf("timestamps of batch %d and %d are the same. %d == %d", i, i+1, msgs[i].Timestamp, msgs[i+1].Timestamp))
		}
	}
}
