|
1 | 1 | package rpc
|
2 | 2 |
|
3 | 3 | import (
|
4 |
| - "testing" |
5 |
| - |
6 | 4 | "context"
|
| 5 | + "testing" |
7 | 6 |
|
8 | 7 | clustermock "github.com/projecteru2/core/cluster/mocks"
|
9 | 8 | enginemock "github.com/projecteru2/core/engine/mocks"
|
10 | 9 | enginetypes "github.com/projecteru2/core/engine/types"
|
11 | 10 | pb "github.com/projecteru2/core/rpc/gen"
|
| 11 | + rpcmocks "github.com/projecteru2/core/rpc/mocks" |
12 | 12 | "github.com/projecteru2/core/types"
|
13 | 13 | "github.com/stretchr/testify/assert"
|
14 | 14 | "github.com/stretchr/testify/mock"
|
@@ -70,3 +70,125 @@ func TestSetNodeTranform(t *testing.T) {
|
70 | 70 | assert.Nil(t, err)
|
71 | 71 | assert.Equal(t, 2, len(o.DeltaCPU))
|
72 | 72 | }
|
| 73 | + |
| 74 | +func TestRunAndWaitSync(t *testing.T) { |
| 75 | + v := newVibranium() |
| 76 | + |
| 77 | + stream := &rpcmocks.CoreRPC_RunAndWaitServer{} |
| 78 | + stream.On("Context").Return(context.Background()) |
| 79 | + stream.On("Recv").Return(&pb.RunAndWaitOptions{ |
| 80 | + DeployOptions: &pb.DeployOptions{ |
| 81 | + Name: "deploy", |
| 82 | + Entrypoint: &pb.EntrypointOptions{ |
| 83 | + Name: "entry", |
| 84 | + Command: "ping", |
| 85 | + }, |
| 86 | + Podname: "pod", |
| 87 | + Image: "image", |
| 88 | + OpenStdin: false, |
| 89 | + ResourceOpts: &pb.ResourceOptions{}, |
| 90 | + }, |
| 91 | + Cmd: []byte("ping"), |
| 92 | + Async: false, |
| 93 | + }, nil) |
| 94 | + |
| 95 | + rc := []*pb.AttachWorkloadMessage{} |
| 96 | + streamSendMock := func(m *pb.AttachWorkloadMessage) error { |
| 97 | + rc = append(rc, m) |
| 98 | + return nil |
| 99 | + } |
| 100 | + stream.On("Send", mock.Anything).Return(streamSendMock) |
| 101 | + |
| 102 | + runAndWait := func(_ context.Context, _ *types.DeployOptions, _ <-chan []byte) <-chan *types.AttachWorkloadMessage { |
| 103 | + ch := make(chan *types.AttachWorkloadMessage) |
| 104 | + go func() { |
| 105 | + // first message to report workload id |
| 106 | + ch <- &types.AttachWorkloadMessage{ |
| 107 | + WorkloadID: "workloadidfortonic", |
| 108 | + Data: []byte(""), |
| 109 | + StdStreamType: types.Stdout, |
| 110 | + } |
| 111 | + // second message to report output of process |
| 112 | + ch <- &types.AttachWorkloadMessage{ |
| 113 | + WorkloadID: "workloadidfortonic", |
| 114 | + Data: []byte("network not reachable"), |
| 115 | + StdStreamType: types.Stdout, |
| 116 | + } |
| 117 | + close(ch) |
| 118 | + }() |
| 119 | + return ch |
| 120 | + } |
| 121 | + cluster := v.cluster.(*clustermock.Cluster) |
| 122 | + cluster.On("RunAndWait", mock.Anything, mock.Anything, mock.Anything).Return(runAndWait, nil) |
| 123 | + |
| 124 | + err := v.RunAndWait(stream) |
| 125 | + assert.NoError(t, err) |
| 126 | + assert.Equal(t, len(rc), 2) |
| 127 | + |
| 128 | + m1 := rc[0] |
| 129 | + assert.Equal(t, m1.WorkloadId, "workloadidfortonic") |
| 130 | + assert.Equal(t, m1.Data, []byte("")) |
| 131 | + |
| 132 | + m2 := rc[1] |
| 133 | + assert.Equal(t, m2.WorkloadId, "workloadidfortonic") |
| 134 | + assert.Equal(t, m2.Data, []byte("network not reachable")) |
| 135 | +} |
| 136 | + |
| 137 | +func TestRunAndWaitAsync(t *testing.T) { |
| 138 | + v := newVibranium() |
| 139 | + |
| 140 | + stream := &rpcmocks.CoreRPC_RunAndWaitServer{} |
| 141 | + stream.On("Context").Return(context.Background()) |
| 142 | + stream.On("Recv").Return(&pb.RunAndWaitOptions{ |
| 143 | + DeployOptions: &pb.DeployOptions{ |
| 144 | + Name: "deploy", |
| 145 | + Entrypoint: &pb.EntrypointOptions{ |
| 146 | + Name: "entry", |
| 147 | + Command: "ping", |
| 148 | + }, |
| 149 | + Podname: "pod", |
| 150 | + Image: "image", |
| 151 | + OpenStdin: false, |
| 152 | + ResourceOpts: &pb.ResourceOptions{}, |
| 153 | + }, |
| 154 | + Cmd: []byte("ping"), |
| 155 | + Async: true, |
| 156 | + }, nil) |
| 157 | + |
| 158 | + rc := []*pb.AttachWorkloadMessage{} |
| 159 | + streamSendMock := func(m *pb.AttachWorkloadMessage) error { |
| 160 | + rc = append(rc, m) |
| 161 | + return nil |
| 162 | + } |
| 163 | + stream.On("Send", mock.Anything).Return(streamSendMock) |
| 164 | + |
| 165 | + runAndWait := func(_ context.Context, _ *types.DeployOptions, _ <-chan []byte) <-chan *types.AttachWorkloadMessage { |
| 166 | + ch := make(chan *types.AttachWorkloadMessage) |
| 167 | + go func() { |
| 168 | + // first message to report workload id |
| 169 | + ch <- &types.AttachWorkloadMessage{ |
| 170 | + WorkloadID: "workloadidfortonic", |
| 171 | + Data: []byte(""), |
| 172 | + StdStreamType: types.Stdout, |
| 173 | + } |
| 174 | + // second message to report output of process |
| 175 | + ch <- &types.AttachWorkloadMessage{ |
| 176 | + WorkloadID: "workloadidfortonic", |
| 177 | + Data: []byte("network not reachable"), |
| 178 | + StdStreamType: types.Stdout, |
| 179 | + } |
| 180 | + close(ch) |
| 181 | + }() |
| 182 | + return ch |
| 183 | + } |
| 184 | + cluster := v.cluster.(*clustermock.Cluster) |
| 185 | + cluster.On("RunAndWait", mock.Anything, mock.Anything, mock.Anything).Return(runAndWait, nil) |
| 186 | + |
| 187 | + err := v.RunAndWait(stream) |
| 188 | + assert.NoError(t, err) |
| 189 | + assert.Equal(t, len(rc), 1) |
| 190 | + |
| 191 | + m1 := rc[0] |
| 192 | + assert.Equal(t, m1.WorkloadId, "workloadidfortonic") |
| 193 | + assert.Equal(t, m1.Data, []byte("")) |
| 194 | +} |
0 commit comments