diff --git a/.env.ci b/.env.ci index 9052efc8..87f433b5 100644 --- a/.env.ci +++ b/.env.ci @@ -25,4 +25,6 @@ REDIS_ADDR=localhost:6379 DEBUG_LOG=true +JOB_INTERVAL_SEC=120 + HYDRA_ADMIN_URL=http://hydra:4445 diff --git a/.env.sample b/.env.sample index 52a1bed9..3da2448a 100644 --- a/.env.sample +++ b/.env.sample @@ -26,4 +26,6 @@ REDIS_ADDR=redis:6379 DEBUG_LOG=true +JOB_INTERVAL_SEC=120 + HYDRA_ADMIN_URL=http://hydra:4445 diff --git a/Gopkg.lock b/Gopkg.lock index b2c30a3c..7871d870 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -9,6 +9,22 @@ revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005" version = "v0.3.1" +[[projects]] + digest = "1:439bfb51db599cd80766736b93b3d10e9314361197ada0b1209a51627a00ccd5" + name = "github.com/PuerkitoBio/goquery" + packages = ["."] + pruneopts = "" + revision = "2d2796f41742ece03e8086188fa4db16a3a0b458" + version = "v1.5.0" + +[[projects]] + digest = "1:e3726ad6f38f710e84c8dcd0e830014de6eaeea81f28d91ae898afecc078479a" + name = "github.com/andybalholm/cascadia" + packages = ["."] + pruneopts = "" + revision = "901648c87902174f774fac311d7f176f8647bdaa" + version = "v1.0.0" + [[projects]] digest = "1:f1a75a8e00244e5ea77ff274baa9559eb877437b240ee7b278f3fc560d9f08bf" name = "github.com/dustin/go-humanize" @@ -409,6 +425,28 @@ revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" version = "v1.1.2" +[[projects]] + digest = "1:0ae05e34cbf90e309af02a3f04d2fc3373eb3bed967fd270f5a7cc221d40db0c" + name = "github.com/mmcdole/gofeed" + packages = [ + ".", + "atom", + "extensions", + "internal/shared", + "rss", + ] + pruneopts = "" + revision = "42010a154d249f5f753dfe03a6390b59671e403e" + version = "v1.0.0-beta2" + +[[projects]] + branch = "master" + digest = "1:8067e0622d4e1c3ca83149a838fa85214c90c135db5bbafcf48587f82b3278e9" + name = "github.com/mmcdole/goxpp" + packages = ["."] + pruneopts = "" + revision = "0068e33feabfc0086c7aeb58a9603f91c061c89f" + [[projects]] branch = "master" digest = "1:5350b8f9bedc3e9f3d940abc6a3914c76b9ada1c0c3a8c13273d2eb2c7a16878" @@ -670,6 +708,9 @@ packages = [ "context", "context/ctxhttp", + "html", + "html/atom", + "html/charset", "http/httpguts", "http2", "http2/hpack", @@ -719,12 +760,24 @@ packages = [ "collate", "collate/build", + "encoding", + "encoding/charmap", + "encoding/htmlindex", + "encoding/internal", + "encoding/internal/identifier", + "encoding/japanese", + "encoding/korean", + "encoding/simplifiedchinese", + "encoding/traditionalchinese", + "encoding/unicode", "internal/colltab", "internal/gen", "internal/tag", "internal/triegen", "internal/ucd", + "internal/utf8internal", "language", + "runes", "secure/bidirule", "transform", "unicode/bidi", @@ -902,6 +955,7 @@ "github.com/golang/protobuf/proto", "github.com/golang/protobuf/protoc-gen-go", "github.com/golang/protobuf/ptypes/empty", + "github.com/golang/protobuf/ptypes/timestamp", "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway", "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger", "github.com/grpc-ecosystem/grpc-gateway/runtime", @@ -917,6 +971,7 @@ "github.com/kelseyhightower/envconfig", "github.com/lib/pq", "github.com/minio/minio-go", + "github.com/mmcdole/gofeed", "github.com/mwitkow/go-proto-validators", "github.com/mwitkow/go-proto-validators/protoc-gen-govalidators", "github.com/ory/hydra/sdk/go/hydra", diff --git a/api/entries.pb.go b/api/entries.pb.go new file mode 100644 index 00000000..e9691f3e --- /dev/null +++ b/api/entries.pb.go @@ -0,0 +1,351 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: entries.proto + +package api_pb // import "github.com/ProgrammingLab/prolab-accounts/api" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/ptypes/empty" +import timestamp "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/mwitkow/go-proto-validators" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Entry struct { + EntryId uint32 `protobuf:"varint,1,opt,name=entry_id,json=entryId,proto3" json:"entry_id,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Content string `protobuf:"bytes,4,opt,name=content,proto3" json:"content,omitempty"` + Link string `protobuf:"bytes,5,opt,name=link,proto3" json:"link,omitempty"` + Author *User `protobuf:"bytes,6,opt,name=author,proto3" json:"author,omitempty"` + ImageUrl string `protobuf:"bytes,7,opt,name=image_url,json=imageUrl,proto3" json:"image_url,omitempty"` + PublishedAt *timestamp.Timestamp `protobuf:"bytes,8,opt,name=published_at,json=publishedAt,proto3" json:"published_at,omitempty"` + UpdatedAt *timestamp.Timestamp `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Blog *Blog `protobuf:"bytes,10,opt,name=blog,proto3" json:"blog,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_entries_f8fe841f69ef3e33, []int{0} +} +func (m *Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Entry.Unmarshal(m, b) +} +func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Entry.Marshal(b, m, deterministic) +} +func (dst *Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Entry.Merge(dst, src) +} +func (m *Entry) XXX_Size() int { + return xxx_messageInfo_Entry.Size(m) +} +func (m *Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Entry proto.InternalMessageInfo + +func (m *Entry) GetEntryId() uint32 { + if m != nil { + return m.EntryId + } + return 0 +} + +func (m *Entry) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Entry) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Entry) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +func (m *Entry) GetLink() string { + if m != nil { + return m.Link + } + return "" +} + +func (m *Entry) GetAuthor() *User { + if m != nil { + return m.Author + } + return nil +} + +func (m *Entry) GetImageUrl() string { + if m != nil { + return m.ImageUrl + } + return "" +} + +func (m *Entry) GetPublishedAt() *timestamp.Timestamp { + if m != nil { + return m.PublishedAt + } + return nil +} + +func (m *Entry) GetUpdatedAt() *timestamp.Timestamp { + if m != nil { + return m.UpdatedAt + } + return nil +} + +func (m *Entry) GetBlog() *Blog { + if m != nil { + return m.Blog + } + return nil +} + +type ListEntriesRequest struct { + PageToken uint32 `protobuf:"varint,1,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListEntriesRequest) Reset() { *m = ListEntriesRequest{} } +func (m *ListEntriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListEntriesRequest) ProtoMessage() {} +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_entries_f8fe841f69ef3e33, []int{1} +} +func (m *ListEntriesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListEntriesRequest.Unmarshal(m, b) +} +func (m *ListEntriesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListEntriesRequest.Marshal(b, m, deterministic) +} +func (dst *ListEntriesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListEntriesRequest.Merge(dst, src) +} +func (m *ListEntriesRequest) XXX_Size() int { + return xxx_messageInfo_ListEntriesRequest.Size(m) +} +func (m *ListEntriesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListEntriesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListEntriesRequest proto.InternalMessageInfo + +func (m *ListEntriesRequest) GetPageToken() uint32 { + if m != nil { + return m.PageToken + } + return 0 +} + +func (m *ListEntriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +type ListEntriesResponse struct { + Entries []*Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + NextPageToken uint32 `protobuf:"varint,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListEntriesResponse) Reset() { *m = ListEntriesResponse{} } +func (m *ListEntriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListEntriesResponse) ProtoMessage() {} +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_entries_f8fe841f69ef3e33, []int{2} +} +func (m *ListEntriesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListEntriesResponse.Unmarshal(m, b) +} +func (m *ListEntriesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListEntriesResponse.Marshal(b, m, deterministic) +} +func (dst *ListEntriesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListEntriesResponse.Merge(dst, src) +} +func (m *ListEntriesResponse) XXX_Size() int { + return xxx_messageInfo_ListEntriesResponse.Size(m) +} +func (m *ListEntriesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListEntriesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListEntriesResponse proto.InternalMessageInfo + +func (m *ListEntriesResponse) GetEntries() []*Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListEntriesResponse) GetNextPageToken() uint32 { + if m != nil { + return m.NextPageToken + } + return 0 +} + +func init() { + proto.RegisterType((*Entry)(nil), "programming_lab.prolab_accounts.Entry") + proto.RegisterType((*ListEntriesRequest)(nil), "programming_lab.prolab_accounts.ListEntriesRequest") + proto.RegisterType((*ListEntriesResponse)(nil), "programming_lab.prolab_accounts.ListEntriesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EntryServiceClient is the client API for EntryService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EntryServiceClient interface { + ListPublicEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) +} + +type entryServiceClient struct { + cc *grpc.ClientConn +} + +func NewEntryServiceClient(cc *grpc.ClientConn) EntryServiceClient { + return &entryServiceClient{cc} +} + +func (c *entryServiceClient) ListPublicEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { + out := new(ListEntriesResponse) + err := c.cc.Invoke(ctx, "/programming_lab.prolab_accounts.EntryService/ListPublicEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EntryServiceServer is the server API for EntryService service. +type EntryServiceServer interface { + ListPublicEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) +} + +func RegisterEntryServiceServer(s *grpc.Server, srv EntryServiceServer) { + s.RegisterService(&_EntryService_serviceDesc, srv) +} + +func _EntryService_ListPublicEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServiceServer).ListPublicEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/programming_lab.prolab_accounts.EntryService/ListPublicEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServiceServer).ListPublicEntries(ctx, req.(*ListEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _EntryService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "programming_lab.prolab_accounts.EntryService", + HandlerType: (*EntryServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListPublicEntries", + Handler: _EntryService_ListPublicEntries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "entries.proto", +} + +func init() { proto.RegisterFile("entries.proto", fileDescriptor_entries_f8fe841f69ef3e33) } + +var fileDescriptor_entries_f8fe841f69ef3e33 = []byte{ + // 579 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x4f, 0x6b, 0xd4, 0x40, + 0x14, 0x27, 0x6d, 0xb7, 0xbb, 0x3b, 0xdb, 0xa5, 0xed, 0x28, 0x38, 0x6e, 0x95, 0x2e, 0x0b, 0x96, + 0xbd, 0x34, 0x81, 0xb6, 0x14, 0x44, 0x0a, 0x5a, 0xe8, 0x41, 0xe8, 0x61, 0x49, 0xdb, 0x8b, 0x97, + 0x30, 0x49, 0xc6, 0xec, 0xb0, 0xc9, 0xcc, 0x38, 0xf3, 0xd2, 0xba, 0xbd, 0x08, 0x7e, 0x05, 0x0f, + 0xe2, 0x37, 0xf0, 0xfb, 0xf8, 0x01, 0x04, 0xf1, 0x7b, 0x28, 0x33, 0x49, 0xea, 0xaa, 0x87, 0xd5, + 0x9c, 0xf2, 0x7e, 0xef, 0xfd, 0xde, 0xfb, 0xbd, 0x3f, 0x83, 0xfa, 0x4c, 0x80, 0xe6, 0xcc, 0xf8, + 0x4a, 0x4b, 0x90, 0x78, 0x57, 0x69, 0x99, 0x69, 0x5a, 0x14, 0x5c, 0x64, 0x51, 0x4e, 0x63, 0x0b, + 0xe7, 0x34, 0x8e, 0x68, 0x92, 0xc8, 0x52, 0x80, 0x19, 0x1c, 0x67, 0x1c, 0xa6, 0x65, 0xec, 0x27, + 0xb2, 0x08, 0x8a, 0x1b, 0x0e, 0x33, 0x79, 0x13, 0x64, 0x72, 0xdf, 0xb1, 0xf7, 0xaf, 0x69, 0xce, + 0x53, 0x0a, 0x52, 0x9b, 0xe0, 0xee, 0xb7, 0x4a, 0x3c, 0x78, 0x94, 0x49, 0x99, 0xe5, 0x2c, 0xa0, + 0x8a, 0x07, 0x54, 0x08, 0x09, 0x14, 0xb8, 0x14, 0x75, 0xd9, 0xc1, 0x4e, 0xed, 0x75, 0x56, 0x5c, + 0xbe, 0x0e, 0x58, 0xa1, 0x60, 0x5e, 0x3b, 0x77, 0xff, 0x74, 0x02, 0x2f, 0x98, 0x01, 0x5a, 0xa8, + 0x3a, 0x60, 0xab, 0x34, 0x4c, 0x47, 0x71, 0x2e, 0xb3, 0x26, 0x5f, 0xcf, 0x22, 0xb5, 0x31, 0xfa, + 0xb4, 0x8a, 0x5a, 0x67, 0x02, 0xf4, 0x1c, 0x3f, 0x44, 0x1d, 0xdb, 0xee, 0x3c, 0xe2, 0x29, 0xf1, + 0x86, 0xde, 0xb8, 0x1f, 0xb6, 0x9d, 0xfd, 0x32, 0xc5, 0xf7, 0x51, 0x0b, 0x38, 0xe4, 0x8c, 0xac, + 0x0c, 0xbd, 0x71, 0x37, 0xac, 0x0c, 0x3c, 0x44, 0xbd, 0x94, 0x99, 0x44, 0x73, 0x65, 0xd5, 0x92, + 0x55, 0xe7, 0x5b, 0x84, 0x30, 0x41, 0xed, 0x44, 0x0a, 0x60, 0x02, 0xc8, 0x9a, 0xf3, 0x36, 0x26, + 0xc6, 0x68, 0x2d, 0xe7, 0x62, 0x46, 0x5a, 0x0e, 0x76, 0xff, 0xf8, 0x04, 0xad, 0xd3, 0x12, 0xa6, + 0x52, 0x93, 0xf5, 0xa1, 0x37, 0xee, 0x1d, 0x3c, 0xf1, 0x97, 0xcc, 0xdb, 0xbf, 0x32, 0x4c, 0x87, + 0x35, 0x09, 0xef, 0xa0, 0x2e, 0x2f, 0x68, 0xc6, 0xa2, 0x52, 0xe7, 0xa4, 0xed, 0xf2, 0x76, 0x1c, + 0x70, 0xa5, 0x73, 0x7c, 0x82, 0x36, 0x54, 0x19, 0xe7, 0xdc, 0x4c, 0x59, 0x1a, 0x51, 0x20, 0x1d, + 0x57, 0x61, 0xe0, 0x57, 0xd3, 0xf3, 0x9b, 0xe9, 0xf9, 0x97, 0xcd, 0xf4, 0xc2, 0xde, 0x5d, 0xfc, + 0x0b, 0xc0, 0x4f, 0x11, 0x2a, 0x55, 0x4a, 0xa1, 0x22, 0x77, 0x97, 0x92, 0xbb, 0x75, 0xb4, 0xa3, + 0xae, 0xd9, 0xe1, 0x13, 0xf4, 0x8f, 0x3d, 0x9d, 0xe6, 0x32, 0x0b, 0x1d, 0x65, 0x94, 0x20, 0x7c, + 0xce, 0x0d, 0x9c, 0x55, 0x47, 0x18, 0xb2, 0x37, 0x25, 0x33, 0x80, 0x1f, 0x23, 0xa4, 0x6c, 0x9b, + 0x20, 0x67, 0x4c, 0xd4, 0x9b, 0xea, 0x5a, 0xe4, 0xd2, 0x02, 0xd8, 0x47, 0xce, 0x88, 0x0c, 0xbf, + 0xad, 0xf6, 0xd5, 0x3a, 0xdd, 0xfe, 0xf6, 0x75, 0xb7, 0xbf, 0xf5, 0xa3, 0xf9, 0x3c, 0xc2, 0xc2, + 0x8e, 0x8d, 0xb9, 0xe0, 0xb7, 0x6c, 0xf4, 0x0e, 0xdd, 0xfb, 0xad, 0x88, 0x51, 0x52, 0x18, 0x86, + 0x9f, 0xa3, 0x76, 0x7d, 0xfc, 0xc4, 0x1b, 0xae, 0x8e, 0x7b, 0x07, 0x7b, 0x4b, 0x95, 0xbb, 0x33, + 0x0a, 0x1b, 0x1a, 0xde, 0x43, 0x9b, 0x82, 0xbd, 0x85, 0x68, 0x41, 0xec, 0x8a, 0x13, 0xdb, 0xb7, + 0xf0, 0xa4, 0x11, 0x7c, 0xf0, 0xd9, 0x43, 0x1b, 0x8e, 0x7a, 0xc1, 0xf4, 0x35, 0x4f, 0x18, 0xfe, + 0xe8, 0xa1, 0x6d, 0x2b, 0x69, 0x62, 0x17, 0x90, 0xd4, 0xc2, 0xf0, 0xe1, 0xd2, 0xfa, 0x7f, 0xcf, + 0x6a, 0x70, 0xf4, 0x7f, 0xa4, 0xaa, 0xf7, 0xd1, 0x83, 0xf7, 0x5f, 0xbe, 0x7f, 0x58, 0xd9, 0xc6, + 0x9b, 0x41, 0xdd, 0x4b, 0xe0, 0x6e, 0x21, 0x39, 0x3d, 0x7e, 0x75, 0xb4, 0xf0, 0xc2, 0x27, 0xbf, + 0x52, 0x9f, 0xd3, 0x38, 0xa8, 0x32, 0xef, 0x37, 0x99, 0xed, 0x63, 0x7e, 0x46, 0x15, 0x8f, 0x54, + 0x1c, 0xaf, 0xbb, 0x13, 0x39, 0xfc, 0x19, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x39, 0x08, 0x1b, 0x50, + 0x04, 0x00, 0x00, +} diff --git a/api/entries.pb.gw.go b/api/entries.pb.gw.go new file mode 100644 index 00000000..2f78e964 --- /dev/null +++ b/api/entries.pb.gw.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: entries.proto + +/* +Package api_pb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package api_pb + +import ( + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +var ( + filter_EntryService_ListPublicEntries_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_EntryService_ListPublicEntries_0(ctx context.Context, marshaler runtime.Marshaler, client EntryServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListEntriesRequest + var metadata runtime.ServerMetadata + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_EntryService_ListPublicEntries_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ListPublicEntries(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterEntryServiceHandlerFromEndpoint is same as RegisterEntryServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterEntryServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterEntryServiceHandler(ctx, mux, conn) +} + +// RegisterEntryServiceHandler registers the http handlers for service EntryService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterEntryServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterEntryServiceHandlerClient(ctx, mux, NewEntryServiceClient(conn)) +} + +// RegisterEntryServiceHandlerClient registers the http handlers for service EntryService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "EntryServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "EntryServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "EntryServiceClient" to call the correct interceptors. +func RegisterEntryServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client EntryServiceClient) error { + + mux.Handle("GET", pattern_EntryService_ListPublicEntries_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_EntryService_ListPublicEntries_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_EntryService_ListPublicEntries_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_EntryService_ListPublicEntries_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"entries", "public"}, "")) +) + +var ( + forward_EntryService_ListPublicEntries_0 = runtime.ForwardResponseMessage +) diff --git a/api/entries.swagger.json b/api/entries.swagger.json new file mode 100644 index 00000000..b117cc51 --- /dev/null +++ b/api/entries.swagger.json @@ -0,0 +1,191 @@ +{ + "swagger": "2.0", + "info": { + "title": "entries.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/entries/public": { + "get": { + "operationId": "ListPublicEntries", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/prolab_accountsListEntriesResponse" + } + } + }, + "parameters": [ + { + "name": "page_token", + "in": "query", + "required": false, + "type": "integer", + "format": "int64" + }, + { + "name": "page_size", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + } + ], + "tags": [ + "EntryService" + ] + } + } + }, + "definitions": { + "prolab_accountsBlog": { + "type": "object", + "properties": { + "blog_id": { + "type": "integer", + "format": "int64" + }, + "url": { + "type": "string" + }, + "feed_url": { + "type": "string" + } + } + }, + "prolab_accountsEntry": { + "type": "object", + "properties": { + "entry_id": { + "type": "integer", + "format": "int64" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "content": { + "type": "string" + }, + "link": { + "type": "string" + }, + "author": { + "$ref": "#/definitions/prolab_accountsUser" + }, + "image_url": { + "type": "string" + }, + "published_at": { + "type": "string", + "format": "date-time" + }, + "updated_at": { + "type": "string", + "format": "date-time" + }, + "blog": { + "$ref": "#/definitions/prolab_accountsBlog" + } + } + }, + "prolab_accountsListEntriesResponse": { + "type": "object", + "properties": { + "entries": { + "type": "array", + "items": { + "$ref": "#/definitions/prolab_accountsEntry" + } + }, + "next_page_token": { + "type": "integer", + "format": "int64" + } + } + }, + "prolab_accountsProfileScope": { + "type": "string", + "enum": [ + "MEMBERS_ONLY", + "PUBLIC" + ], + "default": "MEMBERS_ONLY" + }, + "prolab_accountsUser": { + "type": "object", + "properties": { + "user_id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "email": { + "type": "string" + }, + "full_name": { + "type": "string" + }, + "icon_url": { + "type": "string" + }, + "description": { + "type": "string" + }, + "grade": { + "type": "integer", + "format": "int32" + }, + "left": { + "type": "boolean", + "format": "boolean" + }, + "role": { + "type": "string" + }, + "twitter_screen_name": { + "type": "string" + }, + "github_user_name": { + "type": "string" + }, + "department": { + "$ref": "#/definitions/typeDepartment" + }, + "profile_scope": { + "$ref": "#/definitions/prolab_accountsProfileScope" + } + } + }, + "typeDepartment": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "short_name": { + "type": "string" + } + } + } + } +} diff --git a/api/entries.validator.pb.go b/api/entries.validator.pb.go new file mode 100644 index 00000000..035daa5d --- /dev/null +++ b/api/entries.validator.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: entries.proto + +package api_pb + +import ( + fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" + _ "github.com/golang/protobuf/ptypes/empty" + _ "github.com/golang/protobuf/ptypes/timestamp" + _ "github.com/mwitkow/go-proto-validators" + _ "google.golang.org/genproto/googleapis/api/annotations" + github_com_mwitkow_go_proto_validators "github.com/mwitkow/go-proto-validators" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *Entry) Validate() error { + if this.Author != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.Author); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("Author", err) + } + } + if this.PublishedAt != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.PublishedAt); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("PublishedAt", err) + } + } + if this.UpdatedAt != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.UpdatedAt); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("UpdatedAt", err) + } + } + if this.Blog != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.Blog); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("Blog", err) + } + } + return nil +} +func (this *ListEntriesRequest) Validate() error { + if !(this.PageSize > -1) { + return github_com_mwitkow_go_proto_validators.FieldError("PageSize", fmt.Errorf(`value '%v' must be greater than '-1'`, this.PageSize)) + } + if !(this.PageSize < 101) { + return github_com_mwitkow_go_proto_validators.FieldError("PageSize", fmt.Errorf(`value '%v' must be less than '101'`, this.PageSize)) + } + return nil +} +func (this *ListEntriesResponse) Validate() error { + for _, item := range this.Entries { + if item != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(item); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("Entries", err) + } + } + } + return nil +} diff --git a/api/ping.pb.go b/api/ping.pb.go new file mode 100644 index 00000000..f81f6e62 --- /dev/null +++ b/api/ping.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: ping.proto + +package api_pb // import "github.com/ProgrammingLab/prolab-accounts/api" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PingRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PingRequest) Reset() { *m = PingRequest{} } +func (m *PingRequest) String() string { return proto.CompactTextString(m) } +func (*PingRequest) ProtoMessage() {} +func (*PingRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ping_223ad0c9ed55b304, []int{0} +} +func (m *PingRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PingRequest.Unmarshal(m, b) +} +func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) +} +func (dst *PingRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PingRequest.Merge(dst, src) +} +func (m *PingRequest) XXX_Size() int { + return xxx_messageInfo_PingRequest.Size(m) +} +func (m *PingRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PingRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PingRequest proto.InternalMessageInfo + +type Pong struct { + Pong string `protobuf:"bytes,1,opt,name=pong,proto3" json:"pong,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Pong) Reset() { *m = Pong{} } +func (m *Pong) String() string { return proto.CompactTextString(m) } +func (*Pong) ProtoMessage() {} +func (*Pong) Descriptor() ([]byte, []int) { + return fileDescriptor_ping_223ad0c9ed55b304, []int{1} +} +func (m *Pong) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Pong.Unmarshal(m, b) +} +func (m *Pong) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Pong.Marshal(b, m, deterministic) +} +func (dst *Pong) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pong.Merge(dst, src) +} +func (m *Pong) XXX_Size() int { + return xxx_messageInfo_Pong.Size(m) +} +func (m *Pong) XXX_DiscardUnknown() { + xxx_messageInfo_Pong.DiscardUnknown(m) +} + +var xxx_messageInfo_Pong proto.InternalMessageInfo + +func (m *Pong) GetPong() string { + if m != nil { + return m.Pong + } + return "" +} + +func init() { + proto.RegisterType((*PingRequest)(nil), "programming_lab.prolab_accounts.PingRequest") + proto.RegisterType((*Pong)(nil), "programming_lab.prolab_accounts.Pong") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// PingServiceClient is the client API for PingService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PingServiceClient interface { + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Pong, error) +} + +type pingServiceClient struct { + cc *grpc.ClientConn +} + +func NewPingServiceClient(cc *grpc.ClientConn) PingServiceClient { + return &pingServiceClient{cc} +} + +func (c *pingServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Pong, error) { + out := new(Pong) + err := c.cc.Invoke(ctx, "/programming_lab.prolab_accounts.PingService/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PingServiceServer is the server API for PingService service. +type PingServiceServer interface { + Ping(context.Context, *PingRequest) (*Pong, error) +} + +func RegisterPingServiceServer(s *grpc.Server, srv PingServiceServer) { + s.RegisterService(&_PingService_serviceDesc, srv) +} + +func _PingService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PingServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/programming_lab.prolab_accounts.PingService/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PingServiceServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PingService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "programming_lab.prolab_accounts.PingService", + HandlerType: (*PingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _PingService_Ping_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "ping.proto", +} + +func init() { proto.RegisterFile("ping.proto", fileDescriptor_ping_223ad0c9ed55b304) } + +var fileDescriptor_ping_223ad0c9ed55b304 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x2f, 0x28, 0xca, 0x4f, 0x2f, 0x4a, 0xcc, 0xcd, + 0xcd, 0xcc, 0x4b, 0x8f, 0xcf, 0x49, 0x4c, 0x02, 0x09, 0xe7, 0x24, 0x26, 0xc5, 0x27, 0x26, 0x27, + 0xe7, 0x97, 0xe6, 0x95, 0x14, 0x4b, 0xc9, 0xa4, 0xe7, 0xe7, 0xa7, 0xe7, 0xa4, 0xea, 0x27, 0x16, + 0x64, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x43, 0xb4, 0x2b, + 0xf1, 0x72, 0x71, 0x07, 0x64, 0xe6, 0xa5, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x28, 0x49, + 0x71, 0xb1, 0x04, 0xe4, 0xe7, 0xa5, 0x0b, 0x09, 0x71, 0xb1, 0x14, 0xe4, 0xe7, 0xa5, 0x4b, 0x30, + 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x46, 0x95, 0x10, 0xa5, 0xc1, 0xa9, 0x45, 0x65, 0x99, + 0xc9, 0xa9, 0x42, 0x59, 0x5c, 0x2c, 0x20, 0xae, 0x90, 0x8e, 0x1e, 0x01, 0x17, 0xe8, 0x21, 0x59, + 0x20, 0xa5, 0x4a, 0x58, 0x75, 0x7e, 0x5e, 0xba, 0x12, 0x6f, 0xd3, 0xe5, 0x27, 0x93, 0x99, 0xd8, + 0x85, 0x58, 0xf5, 0x41, 0x5e, 0x75, 0x32, 0x8b, 0x32, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, + 0x4b, 0xce, 0xcf, 0xd5, 0x0f, 0x40, 0x98, 0xe0, 0x93, 0x98, 0xa4, 0x0f, 0x31, 0x40, 0x17, 0x66, + 0x00, 0xc8, 0xa3, 0xd6, 0x89, 0x05, 0x99, 0xf1, 0x05, 0x49, 0x49, 0x6c, 0x60, 0x4f, 0x1a, 0x03, + 0x02, 0x00, 0x00, 0xff, 0xff, 0x62, 0x80, 0xdc, 0xd4, 0x31, 0x01, 0x00, 0x00, +} diff --git a/api/ping.pb.gw.go b/api/ping.pb.gw.go new file mode 100644 index 00000000..f94cb1fb --- /dev/null +++ b/api/ping.pb.gw.go @@ -0,0 +1,116 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: ping.proto + +/* +Package api_pb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package api_pb + +import ( + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_PingService_Ping_0(ctx context.Context, marshaler runtime.Marshaler, client PingServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq PingRequest + var metadata runtime.ServerMetadata + + msg, err := client.Ping(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterPingServiceHandlerFromEndpoint is same as RegisterPingServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterPingServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterPingServiceHandler(ctx, mux, conn) +} + +// RegisterPingServiceHandler registers the http handlers for service PingService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterPingServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterPingServiceHandlerClient(ctx, mux, NewPingServiceClient(conn)) +} + +// RegisterPingServiceHandlerClient registers the http handlers for service PingService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PingServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PingServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "PingServiceClient" to call the correct interceptors. +func RegisterPingServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PingServiceClient) error { + + mux.Handle("GET", pattern_PingService_Ping_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_PingService_Ping_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_PingService_Ping_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_PingService_Ping_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0}, []string{"ping"}, "")) +) + +var ( + forward_PingService_Ping_0 = runtime.ForwardResponseMessage +) diff --git a/api/ping.swagger.json b/api/ping.swagger.json new file mode 100644 index 00000000..aadebe21 --- /dev/null +++ b/api/ping.swagger.json @@ -0,0 +1,45 @@ +{ + "swagger": "2.0", + "info": { + "title": "ping.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/ping": { + "get": { + "operationId": "Ping", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/prolab_accountsPong" + } + } + }, + "tags": [ + "PingService" + ] + } + } + }, + "definitions": { + "prolab_accountsPong": { + "type": "object", + "properties": { + "pong": { + "type": "string" + } + } + } + } +} diff --git a/api/ping.validator.pb.go b/api/ping.validator.pb.go new file mode 100644 index 00000000..416b3274 --- /dev/null +++ b/api/ping.validator.pb.go @@ -0,0 +1,23 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: ping.proto + +package api_pb + +import ( + fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *PingRequest) Validate() error { + return nil +} +func (this *Pong) Validate() error { + return nil +} diff --git a/api/protos/entries.proto b/api/protos/entries.proto new file mode 100644 index 00000000..a736ef9b --- /dev/null +++ b/api/protos/entries.proto @@ -0,0 +1,45 @@ +syntax = "proto3"; + +package programming_lab.prolab_accounts; + +option go_package = "github.com/ProgrammingLab/prolab-accounts/api;api_pb"; + + +import "github.com/mwitkow/go-proto-validators/validator.proto"; +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; + +import "user_blogs.proto"; +import "users.proto"; + +service EntryService { + rpc ListPublicEntries (ListEntriesRequest) returns (ListEntriesResponse) { + option (google.api.http) = { + get: "/entries/public" + }; + } +} + +message Entry { + uint32 entry_id = 1; + string title = 2; + string description = 3; + string content = 4; + string link = 5; + User author = 6; + string image_url = 7; + google.protobuf.Timestamp published_at = 8; + google.protobuf.Timestamp updated_at = 9; + Blog blog = 10; +} + +message ListEntriesRequest { + uint32 page_token = 1; + int32 page_size = 2 [(validator.field) = {int_gt: -1, int_lt: 101}]; +} + +message ListEntriesResponse { + repeated Entry entries = 1; + uint32 next_page_token = 2; +} diff --git a/api/protos/ping.proto b/api/protos/ping.proto new file mode 100644 index 00000000..4f9d23e6 --- /dev/null +++ b/api/protos/ping.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package programming_lab.prolab_accounts; + +option go_package = "github.com/ProgrammingLab/prolab-accounts/api;api_pb"; + + +import "google/api/annotations.proto"; + +service PingService { + rpc Ping (PingRequest) returns (Pong) { + option (google.api.http) = { + get: "/ping" + }; + } +} + +message PingRequest {} + +message Pong { + string pong = 1; +} diff --git a/api/protos/user_blogs.proto b/api/protos/user_blogs.proto new file mode 100644 index 00000000..e21e8166 --- /dev/null +++ b/api/protos/user_blogs.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package programming_lab.prolab_accounts; + +option go_package = "github.com/ProgrammingLab/prolab-accounts/api;api_pb"; + + +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; + +service UserBlogService { + rpc CreateUserBlog (CreateUserBlogRequest) returns (Blog) { + option (google.api.http) = { + post: "/user/blogs" + body: "*" + }; + } + rpc UpdateUserBlog (UpdateUserBlogRequest) returns (Blog) { + option (google.api.http) = { + patch: "/user/blogs/{blog.blog_id}" + body: "*" + }; + } + rpc DeleteUserBlog (DeleteUserBlogRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/user/blogs/{blog_id}" + }; + } +} + +message Blog { + uint32 blog_id = 1; + string url = 2; + string feed_url = 3; +} + +message CreateUserBlogRequest { + Blog blog = 1; + bool auto_detect_feed = 2; +} + +message UpdateUserBlogRequest { + Blog blog = 1; + bool auto_detect_feed = 2; +} + +message DeleteUserBlogRequest { + uint32 blog_id = 1; +} diff --git a/api/user_blogs.pb.go b/api/user_blogs.pb.go new file mode 100644 index 00000000..0a7764c9 --- /dev/null +++ b/api/user_blogs.pb.go @@ -0,0 +1,388 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: user_blogs.proto + +package api_pb // import "github.com/ProgrammingLab/prolab-accounts/api" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" +import _ "google.golang.org/genproto/googleapis/api/annotations" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Blog struct { + BlogId uint32 `protobuf:"varint,1,opt,name=blog_id,json=blogId,proto3" json:"blog_id,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + FeedUrl string `protobuf:"bytes,3,opt,name=feed_url,json=feedUrl,proto3" json:"feed_url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Blog) Reset() { *m = Blog{} } +func (m *Blog) String() string { return proto.CompactTextString(m) } +func (*Blog) ProtoMessage() {} +func (*Blog) Descriptor() ([]byte, []int) { + return fileDescriptor_user_blogs_017a6ac4ff8d807f, []int{0} +} +func (m *Blog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Blog.Unmarshal(m, b) +} +func (m *Blog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Blog.Marshal(b, m, deterministic) +} +func (dst *Blog) XXX_Merge(src proto.Message) { + xxx_messageInfo_Blog.Merge(dst, src) +} +func (m *Blog) XXX_Size() int { + return xxx_messageInfo_Blog.Size(m) +} +func (m *Blog) XXX_DiscardUnknown() { + xxx_messageInfo_Blog.DiscardUnknown(m) +} + +var xxx_messageInfo_Blog proto.InternalMessageInfo + +func (m *Blog) GetBlogId() uint32 { + if m != nil { + return m.BlogId + } + return 0 +} + +func (m *Blog) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Blog) GetFeedUrl() string { + if m != nil { + return m.FeedUrl + } + return "" +} + +type CreateUserBlogRequest struct { + Blog *Blog `protobuf:"bytes,1,opt,name=blog,proto3" json:"blog,omitempty"` + AutoDetectFeed bool `protobuf:"varint,2,opt,name=auto_detect_feed,json=autoDetectFeed,proto3" json:"auto_detect_feed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateUserBlogRequest) Reset() { *m = CreateUserBlogRequest{} } +func (m *CreateUserBlogRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUserBlogRequest) ProtoMessage() {} +func (*CreateUserBlogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_blogs_017a6ac4ff8d807f, []int{1} +} +func (m *CreateUserBlogRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateUserBlogRequest.Unmarshal(m, b) +} +func (m *CreateUserBlogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateUserBlogRequest.Marshal(b, m, deterministic) +} +func (dst *CreateUserBlogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateUserBlogRequest.Merge(dst, src) +} +func (m *CreateUserBlogRequest) XXX_Size() int { + return xxx_messageInfo_CreateUserBlogRequest.Size(m) +} +func (m *CreateUserBlogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateUserBlogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateUserBlogRequest proto.InternalMessageInfo + +func (m *CreateUserBlogRequest) GetBlog() *Blog { + if m != nil { + return m.Blog + } + return nil +} + +func (m *CreateUserBlogRequest) GetAutoDetectFeed() bool { + if m != nil { + return m.AutoDetectFeed + } + return false +} + +type UpdateUserBlogRequest struct { + Blog *Blog `protobuf:"bytes,1,opt,name=blog,proto3" json:"blog,omitempty"` + AutoDetectFeed bool `protobuf:"varint,2,opt,name=auto_detect_feed,json=autoDetectFeed,proto3" json:"auto_detect_feed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateUserBlogRequest) Reset() { *m = UpdateUserBlogRequest{} } +func (m *UpdateUserBlogRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUserBlogRequest) ProtoMessage() {} +func (*UpdateUserBlogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_blogs_017a6ac4ff8d807f, []int{2} +} +func (m *UpdateUserBlogRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdateUserBlogRequest.Unmarshal(m, b) +} +func (m *UpdateUserBlogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdateUserBlogRequest.Marshal(b, m, deterministic) +} +func (dst *UpdateUserBlogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateUserBlogRequest.Merge(dst, src) +} +func (m *UpdateUserBlogRequest) XXX_Size() int { + return xxx_messageInfo_UpdateUserBlogRequest.Size(m) +} +func (m *UpdateUserBlogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateUserBlogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateUserBlogRequest proto.InternalMessageInfo + +func (m *UpdateUserBlogRequest) GetBlog() *Blog { + if m != nil { + return m.Blog + } + return nil +} + +func (m *UpdateUserBlogRequest) GetAutoDetectFeed() bool { + if m != nil { + return m.AutoDetectFeed + } + return false +} + +type DeleteUserBlogRequest struct { + BlogId uint32 `protobuf:"varint,1,opt,name=blog_id,json=blogId,proto3" json:"blog_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteUserBlogRequest) Reset() { *m = DeleteUserBlogRequest{} } +func (m *DeleteUserBlogRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUserBlogRequest) ProtoMessage() {} +func (*DeleteUserBlogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_user_blogs_017a6ac4ff8d807f, []int{3} +} +func (m *DeleteUserBlogRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteUserBlogRequest.Unmarshal(m, b) +} +func (m *DeleteUserBlogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteUserBlogRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteUserBlogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteUserBlogRequest.Merge(dst, src) +} +func (m *DeleteUserBlogRequest) XXX_Size() int { + return xxx_messageInfo_DeleteUserBlogRequest.Size(m) +} +func (m *DeleteUserBlogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteUserBlogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteUserBlogRequest proto.InternalMessageInfo + +func (m *DeleteUserBlogRequest) GetBlogId() uint32 { + if m != nil { + return m.BlogId + } + return 0 +} + +func init() { + proto.RegisterType((*Blog)(nil), "programming_lab.prolab_accounts.Blog") + proto.RegisterType((*CreateUserBlogRequest)(nil), "programming_lab.prolab_accounts.CreateUserBlogRequest") + proto.RegisterType((*UpdateUserBlogRequest)(nil), "programming_lab.prolab_accounts.UpdateUserBlogRequest") + proto.RegisterType((*DeleteUserBlogRequest)(nil), "programming_lab.prolab_accounts.DeleteUserBlogRequest") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// UserBlogServiceClient is the client API for UserBlogService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type UserBlogServiceClient interface { + CreateUserBlog(ctx context.Context, in *CreateUserBlogRequest, opts ...grpc.CallOption) (*Blog, error) + UpdateUserBlog(ctx context.Context, in *UpdateUserBlogRequest, opts ...grpc.CallOption) (*Blog, error) + DeleteUserBlog(ctx context.Context, in *DeleteUserBlogRequest, opts ...grpc.CallOption) (*empty.Empty, error) +} + +type userBlogServiceClient struct { + cc *grpc.ClientConn +} + +func NewUserBlogServiceClient(cc *grpc.ClientConn) UserBlogServiceClient { + return &userBlogServiceClient{cc} +} + +func (c *userBlogServiceClient) CreateUserBlog(ctx context.Context, in *CreateUserBlogRequest, opts ...grpc.CallOption) (*Blog, error) { + out := new(Blog) + err := c.cc.Invoke(ctx, "/programming_lab.prolab_accounts.UserBlogService/CreateUserBlog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userBlogServiceClient) UpdateUserBlog(ctx context.Context, in *UpdateUserBlogRequest, opts ...grpc.CallOption) (*Blog, error) { + out := new(Blog) + err := c.cc.Invoke(ctx, "/programming_lab.prolab_accounts.UserBlogService/UpdateUserBlog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *userBlogServiceClient) DeleteUserBlog(ctx context.Context, in *DeleteUserBlogRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + out := new(empty.Empty) + err := c.cc.Invoke(ctx, "/programming_lab.prolab_accounts.UserBlogService/DeleteUserBlog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// UserBlogServiceServer is the server API for UserBlogService service. +type UserBlogServiceServer interface { + CreateUserBlog(context.Context, *CreateUserBlogRequest) (*Blog, error) + UpdateUserBlog(context.Context, *UpdateUserBlogRequest) (*Blog, error) + DeleteUserBlog(context.Context, *DeleteUserBlogRequest) (*empty.Empty, error) +} + +func RegisterUserBlogServiceServer(s *grpc.Server, srv UserBlogServiceServer) { + s.RegisterService(&_UserBlogService_serviceDesc, srv) +} + +func _UserBlogService_CreateUserBlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUserBlogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserBlogServiceServer).CreateUserBlog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/programming_lab.prolab_accounts.UserBlogService/CreateUserBlog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserBlogServiceServer).CreateUserBlog(ctx, req.(*CreateUserBlogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserBlogService_UpdateUserBlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUserBlogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserBlogServiceServer).UpdateUserBlog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/programming_lab.prolab_accounts.UserBlogService/UpdateUserBlog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserBlogServiceServer).UpdateUserBlog(ctx, req.(*UpdateUserBlogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UserBlogService_DeleteUserBlog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUserBlogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UserBlogServiceServer).DeleteUserBlog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/programming_lab.prolab_accounts.UserBlogService/DeleteUserBlog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UserBlogServiceServer).DeleteUserBlog(ctx, req.(*DeleteUserBlogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UserBlogService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "programming_lab.prolab_accounts.UserBlogService", + HandlerType: (*UserBlogServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateUserBlog", + Handler: _UserBlogService_CreateUserBlog_Handler, + }, + { + MethodName: "UpdateUserBlog", + Handler: _UserBlogService_UpdateUserBlog_Handler, + }, + { + MethodName: "DeleteUserBlog", + Handler: _UserBlogService_DeleteUserBlog_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "user_blogs.proto", +} + +func init() { proto.RegisterFile("user_blogs.proto", fileDescriptor_user_blogs_017a6ac4ff8d807f) } + +var fileDescriptor_user_blogs_017a6ac4ff8d807f = []byte{ + // 430 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x93, 0xd1, 0x6a, 0xd4, 0x40, + 0x14, 0x86, 0x49, 0xb7, 0xb4, 0x75, 0x8a, 0x31, 0x0c, 0x6c, 0x5b, 0xa3, 0xd2, 0x12, 0x28, 0x2c, + 0x05, 0x67, 0x64, 0x95, 0x82, 0x7a, 0x57, 0xab, 0x20, 0xf4, 0x42, 0x22, 0x7b, 0xe3, 0xcd, 0x30, + 0x93, 0x9c, 0x8d, 0x81, 0x49, 0x26, 0x4e, 0x26, 0x82, 0xa8, 0x08, 0xde, 0xf8, 0x00, 0x82, 0x6f, + 0xe2, 0x93, 0xf8, 0x0a, 0x3e, 0x88, 0x9c, 0xd9, 0x5d, 0x74, 0xd9, 0x95, 0xcd, 0x5d, 0xaf, 0x92, + 0xf9, 0x27, 0xe7, 0xfc, 0xdf, 0xfc, 0x99, 0x43, 0xa2, 0xae, 0x05, 0x2b, 0x94, 0x36, 0x45, 0xcb, + 0x1a, 0x6b, 0x9c, 0xa1, 0xc7, 0x8d, 0x35, 0x85, 0x95, 0x55, 0x55, 0xd6, 0x85, 0xd0, 0x52, 0xa1, + 0xac, 0xa5, 0x12, 0x32, 0xcb, 0x4c, 0x57, 0xbb, 0x36, 0xbe, 0x5b, 0x18, 0x53, 0x68, 0xe0, 0xb2, + 0x29, 0xb9, 0xac, 0x6b, 0xe3, 0xa4, 0x2b, 0x4d, 0x3d, 0x2f, 0x8f, 0xef, 0xcc, 0x77, 0xfd, 0x4a, + 0x75, 0x53, 0x0e, 0x55, 0xe3, 0x3e, 0xcc, 0x36, 0x93, 0x2b, 0xb2, 0x7d, 0xa1, 0x4d, 0x41, 0x0f, + 0xc9, 0x2e, 0x5a, 0x8a, 0x32, 0x3f, 0x0a, 0x4e, 0x82, 0xd1, 0xcd, 0x74, 0x07, 0x97, 0x2f, 0x73, + 0x1a, 0x91, 0x41, 0x67, 0xf5, 0xd1, 0xd6, 0x49, 0x30, 0xba, 0x91, 0xe2, 0x2b, 0xbd, 0x4d, 0xf6, + 0xa6, 0x00, 0xb9, 0x40, 0x79, 0xe0, 0xe5, 0x5d, 0x5c, 0x4f, 0xac, 0x4e, 0x3e, 0x91, 0xe1, 0x33, + 0x0b, 0xd2, 0xc1, 0xa4, 0x05, 0x8b, 0x7d, 0x53, 0x78, 0xd7, 0x41, 0xeb, 0xe8, 0x63, 0xb2, 0x8d, + 0xfd, 0x7c, 0xef, 0xfd, 0xf1, 0x29, 0xdb, 0x70, 0x22, 0xe6, 0x6b, 0x7d, 0x09, 0x1d, 0x91, 0x48, + 0x76, 0xce, 0x88, 0x1c, 0x1c, 0x64, 0x4e, 0xa0, 0x95, 0xa7, 0xd9, 0x4b, 0x43, 0xd4, 0x2f, 0xbd, + 0xfc, 0x02, 0x20, 0x47, 0xf7, 0x49, 0x93, 0x5f, 0x97, 0xfb, 0x03, 0x32, 0xbc, 0x04, 0x0d, 0xab, + 0xee, 0xff, 0x8b, 0x76, 0xfc, 0x73, 0x40, 0x6e, 0x2d, 0x3e, 0x7e, 0x0d, 0xf6, 0x7d, 0x99, 0x01, + 0xfd, 0x16, 0x90, 0x70, 0x39, 0x42, 0x7a, 0xbe, 0x91, 0x77, 0x6d, 0xe6, 0x71, 0xbf, 0x73, 0x26, + 0x07, 0x5f, 0x7f, 0xfd, 0xfe, 0xbe, 0x15, 0x25, 0xfb, 0x1c, 0x2f, 0x1e, 0xf7, 0x17, 0xef, 0x49, + 0x70, 0x46, 0x7f, 0x04, 0x24, 0x5c, 0x8e, 0xb3, 0x07, 0xc9, 0xda, 0xfc, 0xfb, 0x92, 0x9c, 0x7a, + 0x92, 0xe3, 0x71, 0xfc, 0x0f, 0x09, 0xff, 0x88, 0x0f, 0x36, 0x0f, 0xf0, 0x33, 0x82, 0x7d, 0x21, + 0xe1, 0x72, 0xd0, 0x3d, 0xb8, 0xd6, 0xfe, 0x99, 0xf8, 0x80, 0xcd, 0x46, 0x83, 0x2d, 0x46, 0x83, + 0x3d, 0xc7, 0xd1, 0x48, 0xee, 0x79, 0x90, 0xc3, 0xb3, 0xe1, 0x0a, 0x08, 0x32, 0x5c, 0x9c, 0xbf, + 0x79, 0x54, 0x94, 0xee, 0x6d, 0xa7, 0x58, 0x66, 0x2a, 0xfe, 0xea, 0xaf, 0xf5, 0x95, 0x54, 0x7c, + 0xe6, 0x7c, 0x7f, 0xe1, 0x8c, 0x33, 0xf9, 0x54, 0x36, 0xa5, 0x68, 0x94, 0xda, 0xf1, 0x36, 0x0f, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x0d, 0xe4, 0xbf, 0xe2, 0x03, 0x00, 0x00, +} diff --git a/api/user_blogs.pb.gw.go b/api/user_blogs.pb.gw.go new file mode 100644 index 00000000..4016ff1d --- /dev/null +++ b/api/user_blogs.pb.gw.go @@ -0,0 +1,244 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: user_blogs.proto + +/* +Package api_pb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package api_pb + +import ( + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_UserBlogService_CreateUserBlog_0(ctx context.Context, marshaler runtime.Marshaler, client UserBlogServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq CreateUserBlogRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.CreateUserBlog(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_UserBlogService_UpdateUserBlog_0(ctx context.Context, marshaler runtime.Marshaler, client UserBlogServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq UpdateUserBlogRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["blog.blog_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "blog.blog_id") + } + + err = runtime.PopulateFieldFromPath(&protoReq, "blog.blog_id", val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "blog.blog_id", err) + } + + msg, err := client.UpdateUserBlog(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_UserBlogService_DeleteUserBlog_0(ctx context.Context, marshaler runtime.Marshaler, client UserBlogServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteUserBlogRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["blog_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "blog_id") + } + + protoReq.BlogId, err = runtime.Uint32(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "blog_id", err) + } + + msg, err := client.DeleteUserBlog(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterUserBlogServiceHandlerFromEndpoint is same as RegisterUserBlogServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterUserBlogServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterUserBlogServiceHandler(ctx, mux, conn) +} + +// RegisterUserBlogServiceHandler registers the http handlers for service UserBlogService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterUserBlogServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterUserBlogServiceHandlerClient(ctx, mux, NewUserBlogServiceClient(conn)) +} + +// RegisterUserBlogServiceHandlerClient registers the http handlers for service UserBlogService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "UserBlogServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "UserBlogServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "UserBlogServiceClient" to call the correct interceptors. +func RegisterUserBlogServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client UserBlogServiceClient) error { + + mux.Handle("POST", pattern_UserBlogService_CreateUserBlog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UserBlogService_CreateUserBlog_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserBlogService_CreateUserBlog_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("PATCH", pattern_UserBlogService_UpdateUserBlog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UserBlogService_UpdateUserBlog_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserBlogService_UpdateUserBlog_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_UserBlogService_DeleteUserBlog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_UserBlogService_DeleteUserBlog_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_UserBlogService_DeleteUserBlog_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_UserBlogService_CreateUserBlog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"user", "blogs"}, "")) + + pattern_UserBlogService_UpdateUserBlog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"user", "blogs", "blog.blog_id"}, "")) + + pattern_UserBlogService_DeleteUserBlog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"user", "blogs", "blog_id"}, "")) +) + +var ( + forward_UserBlogService_CreateUserBlog_0 = runtime.ForwardResponseMessage + + forward_UserBlogService_UpdateUserBlog_0 = runtime.ForwardResponseMessage + + forward_UserBlogService_DeleteUserBlog_0 = runtime.ForwardResponseMessage +) diff --git a/api/user_blogs.swagger.json b/api/user_blogs.swagger.json new file mode 100644 index 00000000..26bf8076 --- /dev/null +++ b/api/user_blogs.swagger.json @@ -0,0 +1,149 @@ +{ + "swagger": "2.0", + "info": { + "title": "user_blogs.proto", + "version": "version not set" + }, + "schemes": [ + "http", + "https" + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": { + "/user/blogs": { + "post": { + "operationId": "CreateUserBlog", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/prolab_accountsBlog" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/prolab_accountsCreateUserBlogRequest" + } + } + ], + "tags": [ + "UserBlogService" + ] + } + }, + "/user/blogs/{blog.blog_id}": { + "patch": { + "operationId": "UpdateUserBlog", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/prolab_accountsBlog" + } + } + }, + "parameters": [ + { + "name": "blog.blog_id", + "in": "path", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/prolab_accountsUpdateUserBlogRequest" + } + } + ], + "tags": [ + "UserBlogService" + ] + } + }, + "/user/blogs/{blog_id}": { + "delete": { + "operationId": "DeleteUserBlog", + "responses": { + "200": { + "description": "", + "schema": { + "$ref": "#/definitions/protobufEmpty" + } + } + }, + "parameters": [ + { + "name": "blog_id", + "in": "path", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "tags": [ + "UserBlogService" + ] + } + } + }, + "definitions": { + "prolab_accountsBlog": { + "type": "object", + "properties": { + "blog_id": { + "type": "integer", + "format": "int64" + }, + "url": { + "type": "string" + }, + "feed_url": { + "type": "string" + } + } + }, + "prolab_accountsCreateUserBlogRequest": { + "type": "object", + "properties": { + "blog": { + "$ref": "#/definitions/prolab_accountsBlog" + }, + "auto_detect_feed": { + "type": "boolean", + "format": "boolean" + } + } + }, + "prolab_accountsUpdateUserBlogRequest": { + "type": "object", + "properties": { + "blog": { + "$ref": "#/definitions/prolab_accountsBlog" + }, + "auto_detect_feed": { + "type": "boolean", + "format": "boolean" + } + } + }, + "protobufEmpty": { + "type": "object", + "description": "service Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n }\n\nThe JSON representation for `Empty` is empty JSON object `{}`.", + "title": "A generic empty message that you can re-use to avoid defining duplicated\nempty messages in your APIs. A typical example is to use it as the request\nor the response type of an API method. For instance:" + } + } +} diff --git a/api/user_blogs.validator.pb.go b/api/user_blogs.validator.pb.go new file mode 100644 index 00000000..7ccf522f --- /dev/null +++ b/api/user_blogs.validator.pb.go @@ -0,0 +1,41 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: user_blogs.proto + +package api_pb + +import ( + fmt "fmt" + math "math" + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + _ "github.com/golang/protobuf/ptypes/empty" + github_com_mwitkow_go_proto_validators "github.com/mwitkow/go-proto-validators" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *Blog) Validate() error { + return nil +} +func (this *CreateUserBlogRequest) Validate() error { + if this.Blog != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.Blog); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("Blog", err) + } + } + return nil +} +func (this *UpdateUserBlogRequest) Validate() error { + if this.Blog != nil { + if err := github_com_mwitkow_go_proto_validators.CallValidatorIfExists(this.Blog); err != nil { + return github_com_mwitkow_go_proto_validators.FieldError("Blog", err) + } + } + return nil +} +func (this *DeleteUserBlogRequest) Validate() error { + return nil +} diff --git a/api/users.validator.pb.go b/api/users.validator.pb.go index 94e2eb90..47b28938 100644 --- a/api/users.validator.pb.go +++ b/api/users.validator.pb.go @@ -7,10 +7,10 @@ import ( fmt "fmt" math "math" proto "github.com/golang/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - _ "github.com/golang/protobuf/ptypes/empty" _ "github.com/ProgrammingLab/prolab-accounts/api/type" _ "github.com/mwitkow/go-proto-validators" + _ "google.golang.org/genproto/googleapis/api/annotations" + _ "github.com/golang/protobuf/ptypes/empty" regexp "regexp" github_com_mwitkow_go_proto_validators "github.com/mwitkow/go-proto-validators" ) diff --git a/app/config/config.go b/app/config/config.go index 6458e89b..f1cadaf9 100644 --- a/app/config/config.go +++ b/app/config/config.go @@ -11,6 +11,7 @@ type Config struct { DataBaseURL string `envconfig:"database_url" required:"true"` RedisAddr string `envconfig:"redis_addr" required:"true"` DebugLog bool `envconfig:"debug_log"` + JobIntervalSec int `envconfig:"job_interval_sec" required:"true"` HydraAdminURL string `envconfig:"hydra_admin_url" required:"true"` MinioPublicURL string `envconfig:"minio_public_url" required:"true"` MinioEndpoint string `envconfig:"minio_endpoint" required:"true"` diff --git a/app/di/store_component.go b/app/di/store_component.go index 24bfc5a2..23e7f4a7 100644 --- a/app/di/store_component.go +++ b/app/di/store_component.go @@ -11,9 +11,13 @@ import ( "github.com/ProgrammingLab/prolab-accounts/app/config" "github.com/ProgrammingLab/prolab-accounts/infra/store" + entrystore "github.com/ProgrammingLab/prolab-accounts/infra/store/entry" + feedstore "github.com/ProgrammingLab/prolab-accounts/infra/store/feed" + heartbeatstore "github.com/ProgrammingLab/prolab-accounts/infra/store/heartbeat" profilestore "github.com/ProgrammingLab/prolab-accounts/infra/store/profile" sessionstore "github.com/ProgrammingLab/prolab-accounts/infra/store/session" userstore "github.com/ProgrammingLab/prolab-accounts/infra/store/user" + userblogstore "github.com/ProgrammingLab/prolab-accounts/infra/store/user_blog" ) // StoreComponent is an interface of stores @@ -21,6 +25,10 @@ type StoreComponent interface { UserStore(ctx context.Context) store.UserStore SessionStore(ctx context.Context) store.SessionStore ProfileStore(ctx context.Context) store.ProfileStore + UserBlogStore(ctx context.Context) store.UserBlogStore + FeedStore(ctx context.Context) store.FeedStore + EntryStore(ctx context.Context) store.EntryStore + HeartbeatStore(ctx context.Context) store.HeartbeatStore } // NewStoreComponent returns new store component @@ -133,3 +141,19 @@ func (s *storeComponentImpl) SessionStore(ctx context.Context) store.SessionStor func (s *storeComponentImpl) ProfileStore(ctx context.Context) store.ProfileStore { return profilestore.NewProfileStore(ctx, s.db) } + +func (s *storeComponentImpl) UserBlogStore(ctx context.Context) store.UserBlogStore { + return userblogstore.NewUserBlogStore(ctx, s.db) +} + +func (s *storeComponentImpl) FeedStore(ctx context.Context) store.FeedStore { + return feedstore.NewFeedStore(ctx) +} + +func (s *storeComponentImpl) EntryStore(ctx context.Context) store.EntryStore { + return entrystore.NewEntryStore(ctx, s.db) +} + +func (s *storeComponentImpl) HeartbeatStore(ctx context.Context) store.HeartbeatStore { + return heartbeatstore.NewHeartbeatStore(ctx, s.client, s.cfg) +} diff --git a/app/job/feed_job.go b/app/job/feed_job.go new file mode 100644 index 00000000..d737949b --- /dev/null +++ b/app/job/feed_job.go @@ -0,0 +1,43 @@ +package job + +import ( + "context" + "time" + + "google.golang.org/grpc/grpclog" + + "github.com/ProgrammingLab/prolab-accounts/app/di" +) + +func feedJob(ctx context.Context, store di.StoreComponent, debug bool) error { + bs := store.UserBlogStore(ctx) + blogs, err := bs.ListUserBlogs() + if err != nil { + return err + } + + fs := store.FeedStore(ctx) + es := store.EntryStore(ctx) + for _, b := range blogs { + if b.FeedURL == "" { + continue + } + feed, err := fs.GetFeed(b.FeedURL) + if err != nil { + grpclog.Errorf("feed job: failed to get feed: blog id: %v : %+v", b.ID, err) + continue + } + + n, err := es.CreateEntries(b, feed) + if err != nil { + return err + } + if debug { + grpclog.Infof("feed job: created %v entries", n) + } + + <-time.After(100 * time.Millisecond) + } + + return nil +} diff --git a/app/job/heartbeat_job.go b/app/job/heartbeat_job.go new file mode 100644 index 00000000..8cac0655 --- /dev/null +++ b/app/job/heartbeat_job.go @@ -0,0 +1,14 @@ +package job + +import ( + "context" + + "github.com/pkg/errors" + + "github.com/ProgrammingLab/prolab-accounts/app/di" +) + +func heartbeatJob(ctx context.Context, store di.StoreComponent, debug bool) error { + s := store.HeartbeatStore(ctx) + return errors.WithStack(s.Beat()) +} diff --git a/app/job/job.go b/app/job/job.go new file mode 100644 index 00000000..061a1b94 --- /dev/null +++ b/app/job/job.go @@ -0,0 +1,71 @@ +package job + +import ( + "context" + "time" + + "google.golang.org/grpc/grpclog" + + "github.com/ProgrammingLab/prolab-accounts/app/config" + "github.com/ProgrammingLab/prolab-accounts/app/di" + "github.com/ProgrammingLab/prolab-accounts/app/util" +) + +var ( + started = false + stop = make(chan struct{}) + jobs = []Job{ + feedJob, + heartbeatJob, + } +) + +// Job represents job for worker +type Job func(ctx context.Context, store di.StoreComponent, debug bool) error + +// Start starts the worker +func Start(store di.StoreComponent, cfg *config.Config) { + if started { + return + } + started = true + + go func() { + run(store, cfg) + }() +} + +// Close stops the worker +func Close() { + grpclog.Infoln("worker is stopping(^C to force to stop)") + stop <- struct{}{} +} + +func run(store di.StoreComponent, cfg *config.Config) { + interval := time.Duration(cfg.JobIntervalSec) * time.Second + + defer func() { + if err := util.ErrorFromRecover(recover()); err != nil { + grpclog.Errorf("job panic: %+v", err) + grpclog.Infoln("worker is restarting...") + run(store, cfg) + } + }() + + grpclog.Infof("worker started: interval %v", interval) + + for { + select { + case <-time.After(interval): + for _, j := range jobs { + err := j(context.Background(), store, cfg.DebugLog) + if err != nil { + grpclog.Errorf("job error: %+v", err) + } + } + case <-stop: + grpclog.Infoln("worker stopped") + return + } + } +} diff --git a/app/run.go b/app/run.go index 9d3a1c4c..8c74de93 100644 --- a/app/run.go +++ b/app/run.go @@ -9,6 +9,7 @@ import ( "github.com/ProgrammingLab/prolab-accounts/app/config" "github.com/ProgrammingLab/prolab-accounts/app/di" "github.com/ProgrammingLab/prolab-accounts/app/interceptor" + "github.com/ProgrammingLab/prolab-accounts/app/job" "github.com/ProgrammingLab/prolab-accounts/app/server" ) @@ -53,7 +54,14 @@ func Run() error { server.NewSessionServiceServer(store), server.NewUserServiceServer(store, cfg), server.NewOAuthServiceServer(cli, store), + server.NewUserBlogServiceServer(store), + server.NewEntryServiceServer(store, cfg), + server.NewPingServiceServer(store), ), ) + + job.Start(store, cfg) + defer job.Close() + return s.Serve() } diff --git a/app/server/entries_server.go b/app/server/entries_server.go new file mode 100644 index 00000000..7cb41149 --- /dev/null +++ b/app/server/entries_server.go @@ -0,0 +1,100 @@ +package server + +import ( + "context" + "database/sql" + "math" + "time" + + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/izumin5210/grapi/pkg/grapiserver" + "github.com/pkg/errors" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" + "github.com/ProgrammingLab/prolab-accounts/app/config" + "github.com/ProgrammingLab/prolab-accounts/app/di" + "github.com/ProgrammingLab/prolab-accounts/infra/record" +) + +// EntryServiceServer is a composite interface of api_pb.EntryServiceServer and grapiserver.Server. +type EntryServiceServer interface { + api_pb.EntryServiceServer + grapiserver.Server +} + +// NewEntryServiceServer creates a new EntryServiceServer instance. +func NewEntryServiceServer(store di.StoreComponent, cfg *config.Config) EntryServiceServer { + return &entryServiceServerImpl{ + StoreComponent: store, + cfg: cfg, + } +} + +type entryServiceServerImpl struct { + di.StoreComponent + cfg *config.Config +} + +func (s *entryServiceServerImpl) ListPublicEntries(ctx context.Context, req *api_pb.ListEntriesRequest) (*api_pb.ListEntriesResponse, error) { + size := req.GetPageSize() + if size == 0 { + size = 50 + } + maxID := req.GetPageToken() + if maxID == 0 { + maxID = math.MaxUint32 + } + + es := s.EntryStore(ctx) + entries, next, err := es.ListPublicEntries(int64(maxID), int(size)) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return &api_pb.ListEntriesResponse{}, nil + } + return nil, err + } + + resp := entriesToResponse(entries, false, s.cfg) + return &api_pb.ListEntriesResponse{ + Entries: resp, + NextPageToken: uint32(next), + }, nil +} + +func entriesToResponse(entries []*record.Entry, includeEmail bool, cfg *config.Config) []*api_pb.Entry { + res := make([]*api_pb.Entry, 0, len(entries)) + for _, e := range entries { + res = append(res, entryToResponse(e, includeEmail, cfg)) + } + + return res +} + +func entryToResponse(entry *record.Entry, includeEmail bool, cfg *config.Config) *api_pb.Entry { + e := &api_pb.Entry{ + EntryId: uint32(entry.ID), + Title: entry.Title, + Description: entry.Description, + Content: entry.Content, + Link: entry.Link, + ImageUrl: entry.ImageURL, + UpdatedAt: timeToResponse(entry.UpdatedAt), + } + if t := entry.PublishedAt; t.Valid { + e.PublishedAt = timeToResponse(t.Time) + } + if r := entry.R; r != nil { + e.Author = userToResponse(r.Author, includeEmail, cfg) + e.Blog = blogToResponse(r.Blog) + } + + return e +} + +func timeToResponse(t time.Time) *timestamp.Timestamp { + t = t.UTC() + return ×tamp.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} diff --git a/app/server/entries_server_register_funcs.go b/app/server/entries_server_register_funcs.go new file mode 100644 index 00000000..a4635fa6 --- /dev/null +++ b/app/server/entries_server_register_funcs.go @@ -0,0 +1,22 @@ +// Code generated by github.com/izumin5210/grapi. DO NOT EDIT. + +package server + +import ( + "context" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" +) + +// RegisterWithServer implements grapiserver.Server.RegisterWithServer. +func (s *entryServiceServerImpl) RegisterWithServer(grpcSvr *grpc.Server) { + api_pb.RegisterEntryServiceServer(grpcSvr, s) +} + +// RegisterWithHandler implements grapiserver.Server.RegisterWithHandler. +func (s *entryServiceServerImpl) RegisterWithHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return api_pb.RegisterEntryServiceHandler(ctx, mux, conn) +} diff --git a/app/server/entries_server_test.go b/app/server/entries_server_test.go new file mode 100644 index 00000000..abb4e431 --- /dev/null +++ b/app/server/entries_server_test.go @@ -0,0 +1 @@ +package server diff --git a/app/server/ping_server.go b/app/server/ping_server.go new file mode 100644 index 00000000..ed31bb00 --- /dev/null +++ b/app/server/ping_server.go @@ -0,0 +1,43 @@ +package server + +import ( + "context" + + "github.com/izumin5210/grapi/pkg/grapiserver" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" + "github.com/ProgrammingLab/prolab-accounts/app/di" +) + +// PingServiceServer is a composite interface of api_pb.PingServiceServer and grapiserver.Server. +type PingServiceServer interface { + api_pb.PingServiceServer + grapiserver.Server +} + +// NewPingServiceServer creates a new PingServiceServer instance. +func NewPingServiceServer(store di.StoreComponent) PingServiceServer { + return &pingServiceServerImpl{ + StoreComponent: store, + } +} + +type pingServiceServerImpl struct { + di.StoreComponent +} + +var ( + // ErrWorkerStopped retuned when the worker has stopped + ErrWorkerStopped = status.Error(codes.Internal, "worker has stopped") +) + +func (s *pingServiceServerImpl) Ping(ctx context.Context, req *api_pb.PingRequest) (*api_pb.Pong, error) { + hs := s.HeartbeatStore(ctx) + err := hs.GetHeartbeat() + if err != nil { + return nil, ErrWorkerStopped + } + return &api_pb.Pong{}, nil +} diff --git a/app/server/ping_server_register_funcs.go b/app/server/ping_server_register_funcs.go new file mode 100644 index 00000000..a90123d4 --- /dev/null +++ b/app/server/ping_server_register_funcs.go @@ -0,0 +1,22 @@ +// Code generated by github.com/izumin5210/grapi. DO NOT EDIT. + +package server + +import ( + "context" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" +) + +// RegisterWithServer implements grapiserver.Server.RegisterWithServer. +func (s *pingServiceServerImpl) RegisterWithServer(grpcSvr *grpc.Server) { + api_pb.RegisterPingServiceServer(grpcSvr, s) +} + +// RegisterWithHandler implements grapiserver.Server.RegisterWithHandler. +func (s *pingServiceServerImpl) RegisterWithHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return api_pb.RegisterPingServiceHandler(ctx, mux, conn) +} diff --git a/app/server/ping_server_test.go b/app/server/ping_server_test.go new file mode 100644 index 00000000..abb4e431 --- /dev/null +++ b/app/server/ping_server_test.go @@ -0,0 +1 @@ +package server diff --git a/app/server/user_blogs_server.go b/app/server/user_blogs_server.go new file mode 100644 index 00000000..9f75087a --- /dev/null +++ b/app/server/user_blogs_server.go @@ -0,0 +1,179 @@ +package server + +import ( + "context" + "database/sql" + + "github.com/golang/protobuf/ptypes/empty" + "github.com/izumin5210/grapi/pkg/grapiserver" + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" + "github.com/ProgrammingLab/prolab-accounts/app/di" + "github.com/ProgrammingLab/prolab-accounts/app/interceptor" + "github.com/ProgrammingLab/prolab-accounts/app/util" + "github.com/ProgrammingLab/prolab-accounts/infra/record" + "github.com/ProgrammingLab/prolab-accounts/model" +) + +// UserBlogServiceServer is a composite interface of api_pb.UserBlogServiceServer and grapiserver.Server. +type UserBlogServiceServer interface { + api_pb.UserBlogServiceServer + grapiserver.Server +} + +// NewUserBlogServiceServer creates a new UserBlogServiceServer instance. +func NewUserBlogServiceServer(store di.StoreComponent) UserBlogServiceServer { + return &userBlogServiceServerImpl{ + StoreComponent: store, + } +} + +type userBlogServiceServerImpl struct { + di.StoreComponent +} + +var ( + // ErrFeedURLDetectAutomatically returns when feed url could not be found automatically + ErrFeedURLDetectAutomatically = status.Error(codes.InvalidArgument, "feed url could not be found automatically") + // ErrInvalidFeedURL returns when feed url is invalid + ErrInvalidFeedURL = status.Error(codes.InvalidArgument, "feed url is invalid") +) + +func (s *userBlogServiceServerImpl) CreateUserBlog(ctx context.Context, req *api_pb.CreateUserBlogRequest) (*api_pb.Blog, error) { + userID, ok := interceptor.GetCurrentUserID(ctx) + if !ok { + return nil, util.ErrUnauthenticated + } + + blog := req.GetBlog() + feedURL, err := getFeedURL(ctx, s, req) + if err != nil { + return nil, err + } + + b := &record.Blog{ + URL: blog.GetUrl(), + FeedURL: feedURL, + UserID: int64(userID), + } + + bs := s.UserBlogStore(ctx) + err = bs.CreateUserBlog(b) + if err != nil { + return nil, err + } + + return blogToResponse(b), nil +} + +func (s *userBlogServiceServerImpl) UpdateUserBlog(ctx context.Context, req *api_pb.UpdateUserBlogRequest) (*api_pb.Blog, error) { + userID, ok := interceptor.GetCurrentUserID(ctx) + if !ok { + return nil, util.ErrUnauthenticated + } + + blog := req.GetBlog() + feedURL, err := getFeedURL(ctx, s, req) + if err != nil { + return nil, err + } + + b := &record.Blog{ + ID: int64(blog.GetBlogId()), + URL: blog.GetUrl(), + FeedURL: feedURL, + UserID: int64(userID), + } + + bs := s.UserBlogStore(ctx) + + if err := s.canWrite(ctx, userID, b.ID); err != nil { + return nil, err + } + + err = bs.UpdateUserBlog(b) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, util.ErrNotFound + } + return nil, err + } + + return blogToResponse(b), nil +} + +func (s *userBlogServiceServerImpl) DeleteUserBlog(ctx context.Context, req *api_pb.DeleteUserBlogRequest) (*empty.Empty, error) { + userID, ok := interceptor.GetCurrentUserID(ctx) + if !ok { + return nil, util.ErrUnauthenticated + } + + blogID := int64(req.GetBlogId()) + bs := s.UserBlogStore(ctx) + + if err := s.canWrite(ctx, userID, blogID); err != nil { + return nil, err + } + + err := bs.DeleteUserBlog(blogID) + if err != nil { + return nil, err + } + + return &empty.Empty{}, nil +} + +func (s *userBlogServiceServerImpl) canWrite(ctx context.Context, userID model.UserID, blogID int64) error { + bs := s.UserBlogStore(ctx) + b, err := bs.GetUserBlog(int64(blogID)) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return util.ErrNotFound + } + return err + } + if b.UserID != int64(userID) { + return util.ErrNotFound + } + return nil +} + +type blogRequest interface { + GetBlog() *api_pb.Blog + GetAutoDetectFeed() bool +} + +func getFeedURL(ctx context.Context, s di.StoreComponent, req blogRequest) (string, error) { + blog := req.GetBlog() + if req.GetAutoDetectFeed() { + fs := s.FeedStore(ctx) + u, err := fs.GetFeedURL(blog.GetUrl()) + if err != nil { + return "", ErrFeedURLDetectAutomatically + } + return u, nil + } + + u := blog.GetFeedUrl() + fs := s.FeedStore(ctx) + err := fs.IsValidFeedURL(u) + if err != nil { + return "", ErrInvalidFeedURL + } + return u, nil +} + +func blogToResponse(blog *record.Blog) *api_pb.Blog { + if blog == nil { + return nil + } + + return &api_pb.Blog{ + BlogId: uint32(blog.ID), + Url: blog.URL, + FeedUrl: blog.FeedURL, + } +} diff --git a/app/server/user_blogs_server_register_funcs.go b/app/server/user_blogs_server_register_funcs.go new file mode 100644 index 00000000..faa9d1db --- /dev/null +++ b/app/server/user_blogs_server_register_funcs.go @@ -0,0 +1,22 @@ +// Code generated by github.com/izumin5210/grapi. DO NOT EDIT. + +package server + +import ( + "context" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "google.golang.org/grpc" + + api_pb "github.com/ProgrammingLab/prolab-accounts/api" +) + +// RegisterWithServer implements grapiserver.Server.RegisterWithServer. +func (s *userBlogServiceServerImpl) RegisterWithServer(grpcSvr *grpc.Server) { + api_pb.RegisterUserBlogServiceServer(grpcSvr, s) +} + +// RegisterWithHandler implements grapiserver.Server.RegisterWithHandler. +func (s *userBlogServiceServerImpl) RegisterWithHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return api_pb.RegisterUserBlogServiceHandler(ctx, mux, conn) +} diff --git a/app/server/user_blogs_server_test.go b/app/server/user_blogs_server_test.go new file mode 100644 index 00000000..abb4e431 --- /dev/null +++ b/app/server/user_blogs_server_test.go @@ -0,0 +1 @@ +package server diff --git a/app/server/users_server.go b/app/server/users_server.go index 64e8bf4c..744e772c 100644 --- a/app/server/users_server.go +++ b/app/server/users_server.go @@ -186,6 +186,10 @@ func usersToResponse(users []*record.User, includeEmail bool, cfg *config.Config } func userToResponse(user *record.User, includeEmail bool, cfg *config.Config) *api_pb.User { + if user == nil { + return nil + } + var email string if includeEmail { email = user.Email diff --git a/app/util/errors.go b/app/util/errors.go index 8a67f4e0..3fefd69d 100644 --- a/app/util/errors.go +++ b/app/util/errors.go @@ -10,7 +10,9 @@ import ( var ( // ErrUnauthenticated represents unauthenticated error - ErrUnauthenticated = status.Error(codes.Unauthenticated, "Unauthenticated") + ErrUnauthenticated = status.Error(codes.Unauthenticated, "unauthenticated") + // ErrNotFound represents not found error + ErrNotFound = status.Error(codes.NotFound, "not found") ) // CodeFromHTTPStatus converts corresponding HTTP response status into the gRPC error code. diff --git a/db/Schemafile b/db/Schemafile index fc53f76c..87b57dbb 100644 --- a/db/Schemafile +++ b/db/Schemafile @@ -2,3 +2,5 @@ require 'users.schema' require 'profiles.schema' require 'roles.schema' require 'departments.schema' +require 'blogs.schema' +require 'entries.schema' diff --git a/db/blogs.schema b/db/blogs.schema new file mode 100644 index 00000000..9c7649f4 --- /dev/null +++ b/db/blogs.schema @@ -0,0 +1,10 @@ +create_table :blogs, force: :cascade do |t| + t.string :url, null: false + t.string :feed_url, null: false + t.references :user, foreign_key: true, null: false + t.timestamps + + t.index [:feed_url], unique: true +end + +add_foreign_key :blogs, :users diff --git a/db/entries.schema b/db/entries.schema new file mode 100644 index 00000000..d29595bc --- /dev/null +++ b/db/entries.schema @@ -0,0 +1,17 @@ +create_table :entries, force: :cascade do |t| + t.string :title, null: false + t.string :description, null: false + t.text :content, null: false + t.string :link, null: false + t.bigint :author_id, null: false + t.string :guid, null: false + t.string :image_url, null: false + t.references :blog, foreign_key: true, null: false + t.datetime :published_at, null: true + t.timestamps + + t.index [:guid], unique: true +end + +add_foreign_key :entries, :users, column: :author_id +add_foreign_key :entries, :blogs diff --git a/infra/record/blogs.go b/infra/record/blogs.go new file mode 100644 index 00000000..3ab866ee --- /dev/null +++ b/infra/record/blogs.go @@ -0,0 +1,1292 @@ +// Code generated by SQLBoiler (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package record + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/volatiletech/sqlboiler/boil" + "github.com/volatiletech/sqlboiler/queries" + "github.com/volatiletech/sqlboiler/queries/qm" + "github.com/volatiletech/sqlboiler/queries/qmhelper" + "github.com/volatiletech/sqlboiler/strmangle" +) + +// Blog is an object representing the database table. +type Blog struct { + ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"` + URL string `boil:"url" json:"url" toml:"url" yaml:"url"` + FeedURL string `boil:"feed_url" json:"feed_url" toml:"feed_url" yaml:"feed_url"` + UserID int64 `boil:"user_id" json:"user_id" toml:"user_id" yaml:"user_id"` + CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` + UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"` + + R *blogR `boil:"-" json:"-" toml:"-" yaml:"-"` + L blogL `boil:"-" json:"-" toml:"-" yaml:"-"` +} + +var BlogColumns = struct { + ID string + URL string + FeedURL string + UserID string + CreatedAt string + UpdatedAt string +}{ + ID: "id", + URL: "url", + FeedURL: "feed_url", + UserID: "user_id", + CreatedAt: "created_at", + UpdatedAt: "updated_at", +} + +// Generated where + +type whereHelperint64 struct{ field string } + +func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } +func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } +func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } +func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } +func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } +func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } + +type whereHelperstring struct{ field string } + +func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } +func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } +func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } +func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } +func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } +func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } + +type whereHelpertime_Time struct{ field string } + +func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.EQ, x) +} +func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.NEQ, x) +} +func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LT, x) +} +func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LTE, x) +} +func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GT, x) +} +func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GTE, x) +} + +var BlogWhere = struct { + ID whereHelperint64 + URL whereHelperstring + FeedURL whereHelperstring + UserID whereHelperint64 + CreatedAt whereHelpertime_Time + UpdatedAt whereHelpertime_Time +}{ + ID: whereHelperint64{field: `id`}, + URL: whereHelperstring{field: `url`}, + FeedURL: whereHelperstring{field: `feed_url`}, + UserID: whereHelperint64{field: `user_id`}, + CreatedAt: whereHelpertime_Time{field: `created_at`}, + UpdatedAt: whereHelpertime_Time{field: `updated_at`}, +} + +// BlogRels is where relationship names are stored. +var BlogRels = struct { + User string + Entries string +}{ + User: "User", + Entries: "Entries", +} + +// blogR is where relationships are stored. +type blogR struct { + User *User + Entries EntrySlice +} + +// NewStruct creates a new relationship struct +func (*blogR) NewStruct() *blogR { + return &blogR{} +} + +// blogL is where Load methods for each relationship are stored. +type blogL struct{} + +var ( + blogColumns = []string{"id", "url", "feed_url", "user_id", "created_at", "updated_at"} + blogColumnsWithoutDefault = []string{"url", "feed_url", "user_id", "created_at", "updated_at"} + blogColumnsWithDefault = []string{"id"} + blogPrimaryKeyColumns = []string{"id"} +) + +type ( + // BlogSlice is an alias for a slice of pointers to Blog. + // This should generally be used opposed to []Blog. + BlogSlice []*Blog + // BlogHook is the signature for custom Blog hook methods + BlogHook func(context.Context, boil.ContextExecutor, *Blog) error + + blogQuery struct { + *queries.Query + } +) + +// Cache for insert, update and upsert +var ( + blogType = reflect.TypeOf(&Blog{}) + blogMapping = queries.MakeStructMapping(blogType) + blogPrimaryKeyMapping, _ = queries.BindMapping(blogType, blogMapping, blogPrimaryKeyColumns) + blogInsertCacheMut sync.RWMutex + blogInsertCache = make(map[string]insertCache) + blogUpdateCacheMut sync.RWMutex + blogUpdateCache = make(map[string]updateCache) + blogUpsertCacheMut sync.RWMutex + blogUpsertCache = make(map[string]insertCache) +) + +var ( + // Force time package dependency for automated UpdatedAt/CreatedAt. + _ = time.Second + // Force qmhelper dependency for where clause generation (which doesn't + // always happen) + _ = qmhelper.Where +) + +var blogBeforeInsertHooks []BlogHook +var blogBeforeUpdateHooks []BlogHook +var blogBeforeDeleteHooks []BlogHook +var blogBeforeUpsertHooks []BlogHook + +var blogAfterInsertHooks []BlogHook +var blogAfterSelectHooks []BlogHook +var blogAfterUpdateHooks []BlogHook +var blogAfterDeleteHooks []BlogHook +var blogAfterUpsertHooks []BlogHook + +// doBeforeInsertHooks executes all "before insert" hooks. +func (o *Blog) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogBeforeInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpdateHooks executes all "before Update" hooks. +func (o *Blog) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogBeforeUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeDeleteHooks executes all "before Delete" hooks. +func (o *Blog) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogBeforeDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpsertHooks executes all "before Upsert" hooks. +func (o *Blog) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogBeforeUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterInsertHooks executes all "after Insert" hooks. +func (o *Blog) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogAfterInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterSelectHooks executes all "after Select" hooks. +func (o *Blog) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogAfterSelectHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpdateHooks executes all "after Update" hooks. +func (o *Blog) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogAfterUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterDeleteHooks executes all "after Delete" hooks. +func (o *Blog) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogAfterDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpsertHooks executes all "after Upsert" hooks. +func (o *Blog) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range blogAfterUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// AddBlogHook registers your hook function for all future operations. +func AddBlogHook(hookPoint boil.HookPoint, blogHook BlogHook) { + switch hookPoint { + case boil.BeforeInsertHook: + blogBeforeInsertHooks = append(blogBeforeInsertHooks, blogHook) + case boil.BeforeUpdateHook: + blogBeforeUpdateHooks = append(blogBeforeUpdateHooks, blogHook) + case boil.BeforeDeleteHook: + blogBeforeDeleteHooks = append(blogBeforeDeleteHooks, blogHook) + case boil.BeforeUpsertHook: + blogBeforeUpsertHooks = append(blogBeforeUpsertHooks, blogHook) + case boil.AfterInsertHook: + blogAfterInsertHooks = append(blogAfterInsertHooks, blogHook) + case boil.AfterSelectHook: + blogAfterSelectHooks = append(blogAfterSelectHooks, blogHook) + case boil.AfterUpdateHook: + blogAfterUpdateHooks = append(blogAfterUpdateHooks, blogHook) + case boil.AfterDeleteHook: + blogAfterDeleteHooks = append(blogAfterDeleteHooks, blogHook) + case boil.AfterUpsertHook: + blogAfterUpsertHooks = append(blogAfterUpsertHooks, blogHook) + } +} + +// One returns a single blog record from the query. +func (q blogQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Blog, error) { + o := &Blog{} + + queries.SetLimit(q.Query, 1) + + err := q.Bind(ctx, exec, o) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "record: failed to execute a one query for blogs") + } + + if err := o.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + + return o, nil +} + +// All returns all Blog records from the query. +func (q blogQuery) All(ctx context.Context, exec boil.ContextExecutor) (BlogSlice, error) { + var o []*Blog + + err := q.Bind(ctx, exec, &o) + if err != nil { + return nil, errors.Wrap(err, "record: failed to assign all query results to Blog slice") + } + + if len(blogAfterSelectHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + } + } + + return o, nil +} + +// Count returns the count of all Blog records in the query. +func (q blogQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return 0, errors.Wrap(err, "record: failed to count blogs rows") + } + + return count, nil +} + +// Exists checks if the row exists in the table. +func (q blogQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + queries.SetLimit(q.Query, 1) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return false, errors.Wrap(err, "record: failed to check if blogs exists") + } + + return count > 0, nil +} + +// User pointed to by the foreign key. +func (o *Blog) User(mods ...qm.QueryMod) userQuery { + queryMods := []qm.QueryMod{ + qm.Where("id=?", o.UserID), + } + + queryMods = append(queryMods, mods...) + + query := Users(queryMods...) + queries.SetFrom(query.Query, "\"users\"") + + return query +} + +// Entries retrieves all the entry's Entries with an executor. +func (o *Blog) Entries(mods ...qm.QueryMod) entryQuery { + var queryMods []qm.QueryMod + if len(mods) != 0 { + queryMods = append(queryMods, mods...) + } + + queryMods = append(queryMods, + qm.Where("\"entries\".\"blog_id\"=?", o.ID), + ) + + query := Entries(queryMods...) + queries.SetFrom(query.Query, "\"entries\"") + + if len(queries.GetSelect(query.Query)) == 0 { + queries.SetSelect(query.Query, []string{"\"entries\".*"}) + } + + return query +} + +// LoadUser allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for an N-1 relationship. +func (blogL) LoadUser(ctx context.Context, e boil.ContextExecutor, singular bool, maybeBlog interface{}, mods queries.Applicator) error { + var slice []*Blog + var object *Blog + + if singular { + object = maybeBlog.(*Blog) + } else { + slice = *maybeBlog.(*[]*Blog) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &blogR{} + } + args = append(args, object.UserID) + + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &blogR{} + } + + for _, a := range args { + if a == obj.UserID { + continue Outer + } + } + + args = append(args, obj.UserID) + + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`users`), qm.WhereIn(`id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load User") + } + + var resultSlice []*User + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice User") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results of eager load for users") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for users") + } + + if len(blogAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + + if len(resultSlice) == 0 { + return nil + } + + if singular { + foreign := resultSlice[0] + object.R.User = foreign + if foreign.R == nil { + foreign.R = &userR{} + } + foreign.R.Blogs = append(foreign.R.Blogs, object) + return nil + } + + for _, local := range slice { + for _, foreign := range resultSlice { + if local.UserID == foreign.ID { + local.R.User = foreign + if foreign.R == nil { + foreign.R = &userR{} + } + foreign.R.Blogs = append(foreign.R.Blogs, local) + break + } + } + } + + return nil +} + +// LoadEntries allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for a 1-M or N-M relationship. +func (blogL) LoadEntries(ctx context.Context, e boil.ContextExecutor, singular bool, maybeBlog interface{}, mods queries.Applicator) error { + var slice []*Blog + var object *Blog + + if singular { + object = maybeBlog.(*Blog) + } else { + slice = *maybeBlog.(*[]*Blog) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &blogR{} + } + args = append(args, object.ID) + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &blogR{} + } + + for _, a := range args { + if a == obj.ID { + continue Outer + } + } + + args = append(args, obj.ID) + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`entries`), qm.WhereIn(`blog_id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load entries") + } + + var resultSlice []*Entry + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice entries") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results in eager load on entries") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for entries") + } + + if len(entryAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + if singular { + object.R.Entries = resultSlice + for _, foreign := range resultSlice { + if foreign.R == nil { + foreign.R = &entryR{} + } + foreign.R.Blog = object + } + return nil + } + + for _, foreign := range resultSlice { + for _, local := range slice { + if local.ID == foreign.BlogID { + local.R.Entries = append(local.R.Entries, foreign) + if foreign.R == nil { + foreign.R = &entryR{} + } + foreign.R.Blog = local + break + } + } + } + + return nil +} + +// SetUser of the blog to the related item. +// Sets o.R.User to related. +// Adds o to related.R.Blogs. +func (o *Blog) SetUser(ctx context.Context, exec boil.ContextExecutor, insert bool, related *User) error { + var err error + if insert { + if err = related.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } + + updateQuery := fmt.Sprintf( + "UPDATE \"blogs\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"user_id"}), + strmangle.WhereClause("\"", "\"", 2, blogPrimaryKeyColumns), + ) + values := []interface{}{related.ID, o.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update local table") + } + + o.UserID = related.ID + if o.R == nil { + o.R = &blogR{ + User: related, + } + } else { + o.R.User = related + } + + if related.R == nil { + related.R = &userR{ + Blogs: BlogSlice{o}, + } + } else { + related.R.Blogs = append(related.R.Blogs, o) + } + + return nil +} + +// AddEntries adds the given related objects to the existing relationships +// of the blog, optionally inserting them as new records. +// Appends related to o.R.Entries. +// Sets related.R.Blog appropriately. +func (o *Blog) AddEntries(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Entry) error { + var err error + for _, rel := range related { + if insert { + rel.BlogID = o.ID + if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } else { + updateQuery := fmt.Sprintf( + "UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"blog_id"}), + strmangle.WhereClause("\"", "\"", 2, entryPrimaryKeyColumns), + ) + values := []interface{}{o.ID, rel.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update foreign table") + } + + rel.BlogID = o.ID + } + } + + if o.R == nil { + o.R = &blogR{ + Entries: related, + } + } else { + o.R.Entries = append(o.R.Entries, related...) + } + + for _, rel := range related { + if rel.R == nil { + rel.R = &entryR{ + Blog: o, + } + } else { + rel.R.Blog = o + } + } + return nil +} + +// Blogs retrieves all the records using an executor. +func Blogs(mods ...qm.QueryMod) blogQuery { + mods = append(mods, qm.From("\"blogs\"")) + return blogQuery{NewQuery(mods...)} +} + +// FindBlog retrieves a single record by ID with an executor. +// If selectCols is empty Find will return all columns. +func FindBlog(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Blog, error) { + blogObj := &Blog{} + + sel := "*" + if len(selectCols) > 0 { + sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") + } + query := fmt.Sprintf( + "select %s from \"blogs\" where \"id\"=$1", sel, + ) + + q := queries.Raw(query, iD) + + err := q.Bind(ctx, exec, blogObj) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "record: unable to select from blogs") + } + + return blogObj, nil +} + +// Insert a single record using an executor. +// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. +func (o *Blog) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { + if o == nil { + return errors.New("record: no blogs provided for insertion") + } + + var err error + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + if o.CreatedAt.IsZero() { + o.CreatedAt = currTime + } + if o.UpdatedAt.IsZero() { + o.UpdatedAt = currTime + } + } + + if err := o.doBeforeInsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(blogColumnsWithDefault, o) + + key := makeCacheKey(columns, nzDefaults) + blogInsertCacheMut.RLock() + cache, cached := blogInsertCache[key] + blogInsertCacheMut.RUnlock() + + if !cached { + wl, returnColumns := columns.InsertColumnSet( + blogColumns, + blogColumnsWithDefault, + blogColumnsWithoutDefault, + nzDefaults, + ) + + cache.valueMapping, err = queries.BindMapping(blogType, blogMapping, wl) + if err != nil { + return err + } + cache.retMapping, err = queries.BindMapping(blogType, blogMapping, returnColumns) + if err != nil { + return err + } + if len(wl) != 0 { + cache.query = fmt.Sprintf("INSERT INTO \"blogs\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) + } else { + cache.query = "INSERT INTO \"blogs\" %sDEFAULT VALUES%s" + } + + var queryOutput, queryReturning string + + if len(cache.retMapping) != 0 { + queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) + } + + cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + + if err != nil { + return errors.Wrap(err, "record: unable to insert into blogs") + } + + if !cached { + blogInsertCacheMut.Lock() + blogInsertCache[key] = cache + blogInsertCacheMut.Unlock() + } + + return o.doAfterInsertHooks(ctx, exec) +} + +// Update uses an executor to update the Blog. +// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. +// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. +func (o *Blog) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + o.UpdatedAt = currTime + } + + var err error + if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { + return 0, err + } + key := makeCacheKey(columns, nil) + blogUpdateCacheMut.RLock() + cache, cached := blogUpdateCache[key] + blogUpdateCacheMut.RUnlock() + + if !cached { + wl := columns.UpdateColumnSet( + blogColumns, + blogPrimaryKeyColumns, + ) + + if !columns.IsWhitelist() { + wl = strmangle.SetComplement(wl, []string{"created_at"}) + } + if len(wl) == 0 { + return 0, errors.New("record: unable to update blogs, could not build whitelist") + } + + cache.query = fmt.Sprintf("UPDATE \"blogs\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, wl), + strmangle.WhereClause("\"", "\"", len(wl)+1, blogPrimaryKeyColumns), + ) + cache.valueMapping, err = queries.BindMapping(blogType, blogMapping, append(wl, blogPrimaryKeyColumns...)) + if err != nil { + return 0, err + } + } + + values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, values) + } + + var result sql.Result + result, err = exec.ExecContext(ctx, cache.query, values...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update blogs row") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by update for blogs") + } + + if !cached { + blogUpdateCacheMut.Lock() + blogUpdateCache[key] = cache + blogUpdateCacheMut.Unlock() + } + + return rowsAff, o.doAfterUpdateHooks(ctx, exec) +} + +// UpdateAll updates all rows with the specified column values. +func (q blogQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + queries.SetUpdate(q.Query, cols) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update all for blogs") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: unable to retrieve rows affected for blogs") + } + + return rowsAff, nil +} + +// UpdateAll updates all rows with the specified column values, using an executor. +func (o BlogSlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + ln := int64(len(o)) + if ln == 0 { + return 0, nil + } + + if len(cols) == 0 { + return 0, errors.New("record: update all requires at least one column argument") + } + + colNames := make([]string, len(cols)) + args := make([]interface{}, len(cols)) + + i := 0 + for name, value := range cols { + colNames[i] = name + args[i] = value + i++ + } + + // Append all of the primary key values for each column + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blogPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := fmt.Sprintf("UPDATE \"blogs\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, colNames), + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, blogPrimaryKeyColumns, len(o))) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args...) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update all in blog slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: unable to retrieve rows affected all in update all blog") + } + return rowsAff, nil +} + +// Upsert attempts an insert using an executor, and does an update or ignore on conflict. +// See boil.Columns documentation for how to properly use updateColumns and insertColumns. +func (o *Blog) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { + if o == nil { + return errors.New("record: no blogs provided for upsert") + } + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + if o.CreatedAt.IsZero() { + o.CreatedAt = currTime + } + o.UpdatedAt = currTime + } + + if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(blogColumnsWithDefault, o) + + // Build cache key in-line uglily - mysql vs psql problems + buf := strmangle.GetBuffer() + if updateOnConflict { + buf.WriteByte('t') + } else { + buf.WriteByte('f') + } + buf.WriteByte('.') + for _, c := range conflictColumns { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(updateColumns.Kind)) + for _, c := range updateColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(insertColumns.Kind)) + for _, c := range insertColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + for _, c := range nzDefaults { + buf.WriteString(c) + } + key := buf.String() + strmangle.PutBuffer(buf) + + blogUpsertCacheMut.RLock() + cache, cached := blogUpsertCache[key] + blogUpsertCacheMut.RUnlock() + + var err error + + if !cached { + insert, ret := insertColumns.InsertColumnSet( + blogColumns, + blogColumnsWithDefault, + blogColumnsWithoutDefault, + nzDefaults, + ) + update := updateColumns.UpdateColumnSet( + blogColumns, + blogPrimaryKeyColumns, + ) + + if updateOnConflict && len(update) == 0 { + return errors.New("record: unable to upsert blogs, could not build update column list") + } + + conflict := conflictColumns + if len(conflict) == 0 { + conflict = make([]string, len(blogPrimaryKeyColumns)) + copy(conflict, blogPrimaryKeyColumns) + } + cache.query = buildUpsertQueryPostgres(dialect, "\"blogs\"", updateOnConflict, ret, update, conflict, insert) + + cache.valueMapping, err = queries.BindMapping(blogType, blogMapping, insert) + if err != nil { + return err + } + if len(ret) != 0 { + cache.retMapping, err = queries.BindMapping(blogType, blogMapping, ret) + if err != nil { + return err + } + } + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + var returns []interface{} + if len(cache.retMapping) != 0 { + returns = queries.PtrsFromMapping(value, cache.retMapping) + } + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) + if err == sql.ErrNoRows { + err = nil // Postgres doesn't return anything when there's no update + } + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + if err != nil { + return errors.Wrap(err, "record: unable to upsert blogs") + } + + if !cached { + blogUpsertCacheMut.Lock() + blogUpsertCache[key] = cache + blogUpsertCacheMut.Unlock() + } + + return o.doAfterUpsertHooks(ctx, exec) +} + +// Delete deletes a single Blog record with an executor. +// Delete will match against the primary key column to find the record to delete. +func (o *Blog) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("record: no Blog provided for delete") + } + + if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), blogPrimaryKeyMapping) + sql := "DELETE FROM \"blogs\" WHERE \"id\"=$1" + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args...) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete from blogs") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by delete for blogs") + } + + if err := o.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + return rowsAff, nil +} + +// DeleteAll deletes all matching rows. +func (q blogQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if q.Query == nil { + return 0, errors.New("record: no blogQuery provided for delete all") + } + + queries.SetDelete(q.Query) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete all from blogs") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by deleteall for blogs") + } + + return rowsAff, nil +} + +// DeleteAll deletes all rows in the slice, using an executor. +func (o BlogSlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("record: no Blog slice provided for delete all") + } + + if len(o) == 0 { + return 0, nil + } + + if len(blogBeforeDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + var args []interface{} + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blogPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "DELETE FROM \"blogs\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, blogPrimaryKeyColumns, len(o)) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete all from blog slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by deleteall for blogs") + } + + if len(blogAfterDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + return rowsAff, nil +} + +// Reload refetches the object from the database +// using the primary keys with an executor. +func (o *Blog) Reload(ctx context.Context, exec boil.ContextExecutor) error { + ret, err := FindBlog(ctx, exec, o.ID) + if err != nil { + return err + } + + *o = *ret + return nil +} + +// ReloadAll refetches every row with matching primary key column values +// and overwrites the original object slice with the newly updated slice. +func (o *BlogSlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { + if o == nil || len(*o) == 0 { + return nil + } + + slice := BlogSlice{} + var args []interface{} + for _, obj := range *o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), blogPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "SELECT \"blogs\".* FROM \"blogs\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, blogPrimaryKeyColumns, len(*o)) + + q := queries.Raw(sql, args...) + + err := q.Bind(ctx, exec, &slice) + if err != nil { + return errors.Wrap(err, "record: unable to reload all in BlogSlice") + } + + *o = slice + + return nil +} + +// BlogExists checks if the Blog row exists. +func BlogExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) { + var exists bool + sql := "select exists(select 1 from \"blogs\" where \"id\"=$1 limit 1)" + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, iD) + } + + row := exec.QueryRowContext(ctx, sql, iD) + + err := row.Scan(&exists) + if err != nil { + return false, errors.Wrap(err, "record: unable to check if blogs exists") + } + + return exists, nil +} diff --git a/infra/record/blogs_test.go b/infra/record/blogs_test.go new file mode 100644 index 00000000..a1344f90 --- /dev/null +++ b/infra/record/blogs_test.go @@ -0,0 +1,994 @@ +// Code generated by SQLBoiler (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package record + +import ( + "bytes" + "context" + "reflect" + "testing" + + "github.com/volatiletech/sqlboiler/boil" + "github.com/volatiletech/sqlboiler/queries" + "github.com/volatiletech/sqlboiler/randomize" + "github.com/volatiletech/sqlboiler/strmangle" +) + +var ( + // Relationships sometimes use the reflection helper queries.Equal/queries.Assign + // so force a package dependency in case they don't. + _ = queries.Equal +) + +func testBlogs(t *testing.T) { + t.Parallel() + + query := Blogs() + + if query.Query == nil { + t.Error("expected a query, got nothing") + } +} + +func testBlogsDelete(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := o.Delete(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testBlogsQueryDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := Blogs().DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testBlogsSliceDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := BlogSlice{o} + + if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testBlogsExists(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + e, err := BlogExists(ctx, tx, o.ID) + if err != nil { + t.Errorf("Unable to check if Blog exists: %s", err) + } + if !e { + t.Errorf("Expected BlogExists to return true, but got false.") + } +} + +func testBlogsFind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + blogFound, err := FindBlog(ctx, tx, o.ID) + if err != nil { + t.Error(err) + } + + if blogFound == nil { + t.Error("want a record, got nil") + } +} + +func testBlogsBind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = Blogs().Bind(ctx, tx, o); err != nil { + t.Error(err) + } +} + +func testBlogsOne(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if x, err := Blogs().One(ctx, tx); err != nil { + t.Error(err) + } else if x == nil { + t.Error("expected to get a non nil record") + } +} + +func testBlogsAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + blogOne := &Blog{} + blogTwo := &Blog{} + if err = randomize.Struct(seed, blogOne, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + if err = randomize.Struct(seed, blogTwo, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = blogOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = blogTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Blogs().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 2 { + t.Error("want 2 records, got:", len(slice)) + } +} + +func testBlogsCount(t *testing.T) { + t.Parallel() + + var err error + seed := randomize.NewSeed() + blogOne := &Blog{} + blogTwo := &Blog{} + if err = randomize.Struct(seed, blogOne, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + if err = randomize.Struct(seed, blogTwo, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = blogOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = blogTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 2 { + t.Error("want 2 records, got:", count) + } +} + +func blogBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func blogAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Blog) error { + *o = Blog{} + return nil +} + +func testBlogsHooks(t *testing.T) { + t.Parallel() + + var err error + + ctx := context.Background() + empty := &Blog{} + o := &Blog{} + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, o, blogDBTypes, false); err != nil { + t.Errorf("Unable to randomize Blog object: %s", err) + } + + AddBlogHook(boil.BeforeInsertHook, blogBeforeInsertHook) + if err = o.doBeforeInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o) + } + blogBeforeInsertHooks = []BlogHook{} + + AddBlogHook(boil.AfterInsertHook, blogAfterInsertHook) + if err = o.doAfterInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o) + } + blogAfterInsertHooks = []BlogHook{} + + AddBlogHook(boil.AfterSelectHook, blogAfterSelectHook) + if err = o.doAfterSelectHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterSelectHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o) + } + blogAfterSelectHooks = []BlogHook{} + + AddBlogHook(boil.BeforeUpdateHook, blogBeforeUpdateHook) + if err = o.doBeforeUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o) + } + blogBeforeUpdateHooks = []BlogHook{} + + AddBlogHook(boil.AfterUpdateHook, blogAfterUpdateHook) + if err = o.doAfterUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o) + } + blogAfterUpdateHooks = []BlogHook{} + + AddBlogHook(boil.BeforeDeleteHook, blogBeforeDeleteHook) + if err = o.doBeforeDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o) + } + blogBeforeDeleteHooks = []BlogHook{} + + AddBlogHook(boil.AfterDeleteHook, blogAfterDeleteHook) + if err = o.doAfterDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o) + } + blogAfterDeleteHooks = []BlogHook{} + + AddBlogHook(boil.BeforeUpsertHook, blogBeforeUpsertHook) + if err = o.doBeforeUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o) + } + blogBeforeUpsertHooks = []BlogHook{} + + AddBlogHook(boil.AfterUpsertHook, blogAfterUpsertHook) + if err = o.doAfterUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o) + } + blogAfterUpsertHooks = []BlogHook{} +} + +func testBlogsInsert(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testBlogsInsertWhitelist(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Whitelist(blogColumnsWithoutDefault...)); err != nil { + t.Error(err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testBlogToManyEntries(t *testing.T) { + var err error + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a Blog + var b, c Entry + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + if err = randomize.Struct(seed, &b, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Fatal(err) + } + + b.BlogID = a.ID + c.BlogID = a.ID + + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := a.Entries().All(ctx, tx) + if err != nil { + t.Fatal(err) + } + + bFound, cFound := false, false + for _, v := range check { + if v.BlogID == b.BlogID { + bFound = true + } + if v.BlogID == c.BlogID { + cFound = true + } + } + + if !bFound { + t.Error("expected to find b") + } + if !cFound { + t.Error("expected to find c") + } + + slice := BlogSlice{&a} + if err = a.L.LoadEntries(ctx, tx, false, (*[]*Blog)(&slice), nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.Entries); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + a.R.Entries = nil + if err = a.L.LoadEntries(ctx, tx, true, &a, nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.Entries); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + if t.Failed() { + t.Logf("%#v", check) + } +} + +func testBlogToManyAddOpEntries(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a Blog + var b, c, d, e Entry + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, blogDBTypes, false, strmangle.SetComplement(blogPrimaryKeyColumns, blogColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + foreigners := []*Entry{&b, &c, &d, &e} + for _, x := range foreigners { + if err = randomize.Struct(seed, x, entryDBTypes, false, strmangle.SetComplement(entryPrimaryKeyColumns, entryColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + foreignersSplitByInsertion := [][]*Entry{ + {&b, &c}, + {&d, &e}, + } + + for i, x := range foreignersSplitByInsertion { + err = a.AddEntries(ctx, tx, i != 0, x...) + if err != nil { + t.Fatal(err) + } + + first := x[0] + second := x[1] + + if a.ID != first.BlogID { + t.Error("foreign key was wrong value", a.ID, first.BlogID) + } + if a.ID != second.BlogID { + t.Error("foreign key was wrong value", a.ID, second.BlogID) + } + + if first.R.Blog != &a { + t.Error("relationship was not added properly to the foreign slice") + } + if second.R.Blog != &a { + t.Error("relationship was not added properly to the foreign slice") + } + + if a.R.Entries[i*2] != first { + t.Error("relationship struct slice not set to correct value") + } + if a.R.Entries[i*2+1] != second { + t.Error("relationship struct slice not set to correct value") + } + + count, err := a.Entries().Count(ctx, tx) + if err != nil { + t.Fatal(err) + } + if want := int64((i + 1) * 2); count != want { + t.Error("want", want, "got", count) + } + } +} +func testBlogToOneUserUsingUser(t *testing.T) { + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var local Blog + var foreign User + + seed := randomize.NewSeed() + if err := randomize.Struct(seed, &local, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + if err := randomize.Struct(seed, &foreign, userDBTypes, false, userColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize User struct: %s", err) + } + + if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + local.UserID = foreign.ID + if err := local.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := local.User().One(ctx, tx) + if err != nil { + t.Fatal(err) + } + + if check.ID != foreign.ID { + t.Errorf("want: %v, got %v", foreign.ID, check.ID) + } + + slice := BlogSlice{&local} + if err = local.L.LoadUser(ctx, tx, false, (*[]*Blog)(&slice), nil); err != nil { + t.Fatal(err) + } + if local.R.User == nil { + t.Error("struct should have been eager loaded") + } + + local.R.User = nil + if err = local.L.LoadUser(ctx, tx, true, &local, nil); err != nil { + t.Fatal(err) + } + if local.R.User == nil { + t.Error("struct should have been eager loaded") + } +} + +func testBlogToOneSetOpUserUsingUser(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a Blog + var b, c User + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, blogDBTypes, false, strmangle.SetComplement(blogPrimaryKeyColumns, blogColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &b, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + for i, x := range []*User{&b, &c} { + err = a.SetUser(ctx, tx, i != 0, x) + if err != nil { + t.Fatal(err) + } + + if a.R.User != x { + t.Error("relationship struct not set to correct value") + } + + if x.R.Blogs[0] != &a { + t.Error("failed to append to foreign relationship struct") + } + if a.UserID != x.ID { + t.Error("foreign key was wrong value", a.UserID) + } + + zero := reflect.Zero(reflect.TypeOf(a.UserID)) + reflect.Indirect(reflect.ValueOf(&a.UserID)).Set(zero) + + if err = a.Reload(ctx, tx); err != nil { + t.Fatal("failed to reload", err) + } + + if a.UserID != x.ID { + t.Error("foreign key was wrong value", a.UserID, x.ID) + } + } +} + +func testBlogsReload(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = o.Reload(ctx, tx); err != nil { + t.Error(err) + } +} + +func testBlogsReloadAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := BlogSlice{o} + + if err = slice.ReloadAll(ctx, tx); err != nil { + t.Error(err) + } +} + +func testBlogsSelect(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Blogs().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 1 { + t.Error("want one record, got:", len(slice)) + } +} + +var ( + blogDBTypes = map[string]string{`ID`: `bigint`, `URL`: `character varying`, `FeedURL`: `character varying`, `UserID`: `bigint`, `CreatedAt`: `timestamp without time zone`, `UpdatedAt`: `timestamp without time zone`} + _ = bytes.MinRead +) + +func testBlogsUpdate(t *testing.T) { + t.Parallel() + + if 0 == len(blogPrimaryKeyColumns) { + t.Skip("Skipping table with no primary key columns") + } + if len(blogColumns) == len(blogPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, blogDBTypes, true, blogPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only affect one row but affected", rowsAff) + } +} + +func testBlogsSliceUpdateAll(t *testing.T) { + t.Parallel() + + if len(blogColumns) == len(blogPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Blog{} + if err = randomize.Struct(seed, o, blogDBTypes, true, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, blogDBTypes, true, blogPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + // Remove Primary keys and unique columns from what we plan to update + var fields []string + if strmangle.StringSliceMatch(blogColumns, blogPrimaryKeyColumns) { + fields = blogColumns + } else { + fields = strmangle.SetComplement( + blogColumns, + blogPrimaryKeyColumns, + ) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + typ := reflect.TypeOf(o).Elem() + n := typ.NumField() + + updateMap := M{} + for _, col := range fields { + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.Tag.Get("boil") == col { + updateMap[col] = value.Field(i).Interface() + } + } + } + + slice := BlogSlice{o} + if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("wanted one record updated but got", rowsAff) + } +} + +func testBlogsUpsert(t *testing.T) { + t.Parallel() + + if len(blogColumns) == len(blogPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + // Attempt the INSERT side of an UPSERT + o := Blog{} + if err = randomize.Struct(seed, &o, blogDBTypes, true); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Blog: %s", err) + } + + count, err := Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } + + // Attempt the UPDATE side of an UPSERT + if err = randomize.Struct(seed, &o, blogDBTypes, false, blogPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Blog: %s", err) + } + + count, err = Blogs().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } +} diff --git a/infra/record/boil_suites_test.go b/infra/record/boil_suites_test.go index b651e367..050bc793 100644 --- a/infra/record/boil_suites_test.go +++ b/infra/record/boil_suites_test.go @@ -12,85 +12,111 @@ import "testing" // It does NOT run each operation group in parallel. // Separating the tests thusly grants avoidance of Postgres deadlocks. func TestParent(t *testing.T) { + t.Run("Blogs", testBlogs) t.Run("Departments", testDepartments) + t.Run("Entries", testEntries) t.Run("Profiles", testProfiles) t.Run("Roles", testRoles) t.Run("Users", testUsers) } func TestDelete(t *testing.T) { + t.Run("Blogs", testBlogsDelete) t.Run("Departments", testDepartmentsDelete) + t.Run("Entries", testEntriesDelete) t.Run("Profiles", testProfilesDelete) t.Run("Roles", testRolesDelete) t.Run("Users", testUsersDelete) } func TestQueryDeleteAll(t *testing.T) { + t.Run("Blogs", testBlogsQueryDeleteAll) t.Run("Departments", testDepartmentsQueryDeleteAll) + t.Run("Entries", testEntriesQueryDeleteAll) t.Run("Profiles", testProfilesQueryDeleteAll) t.Run("Roles", testRolesQueryDeleteAll) t.Run("Users", testUsersQueryDeleteAll) } func TestSliceDeleteAll(t *testing.T) { + t.Run("Blogs", testBlogsSliceDeleteAll) t.Run("Departments", testDepartmentsSliceDeleteAll) + t.Run("Entries", testEntriesSliceDeleteAll) t.Run("Profiles", testProfilesSliceDeleteAll) t.Run("Roles", testRolesSliceDeleteAll) t.Run("Users", testUsersSliceDeleteAll) } func TestExists(t *testing.T) { + t.Run("Blogs", testBlogsExists) t.Run("Departments", testDepartmentsExists) + t.Run("Entries", testEntriesExists) t.Run("Profiles", testProfilesExists) t.Run("Roles", testRolesExists) t.Run("Users", testUsersExists) } func TestFind(t *testing.T) { + t.Run("Blogs", testBlogsFind) t.Run("Departments", testDepartmentsFind) + t.Run("Entries", testEntriesFind) t.Run("Profiles", testProfilesFind) t.Run("Roles", testRolesFind) t.Run("Users", testUsersFind) } func TestBind(t *testing.T) { + t.Run("Blogs", testBlogsBind) t.Run("Departments", testDepartmentsBind) + t.Run("Entries", testEntriesBind) t.Run("Profiles", testProfilesBind) t.Run("Roles", testRolesBind) t.Run("Users", testUsersBind) } func TestOne(t *testing.T) { + t.Run("Blogs", testBlogsOne) t.Run("Departments", testDepartmentsOne) + t.Run("Entries", testEntriesOne) t.Run("Profiles", testProfilesOne) t.Run("Roles", testRolesOne) t.Run("Users", testUsersOne) } func TestAll(t *testing.T) { + t.Run("Blogs", testBlogsAll) t.Run("Departments", testDepartmentsAll) + t.Run("Entries", testEntriesAll) t.Run("Profiles", testProfilesAll) t.Run("Roles", testRolesAll) t.Run("Users", testUsersAll) } func TestCount(t *testing.T) { + t.Run("Blogs", testBlogsCount) t.Run("Departments", testDepartmentsCount) + t.Run("Entries", testEntriesCount) t.Run("Profiles", testProfilesCount) t.Run("Roles", testRolesCount) t.Run("Users", testUsersCount) } func TestHooks(t *testing.T) { + t.Run("Blogs", testBlogsHooks) t.Run("Departments", testDepartmentsHooks) + t.Run("Entries", testEntriesHooks) t.Run("Profiles", testProfilesHooks) t.Run("Roles", testRolesHooks) t.Run("Users", testUsersHooks) } func TestInsert(t *testing.T) { + t.Run("Blogs", testBlogsInsert) + t.Run("Blogs", testBlogsInsertWhitelist) t.Run("Departments", testDepartmentsInsert) t.Run("Departments", testDepartmentsInsertWhitelist) + t.Run("Entries", testEntriesInsert) + t.Run("Entries", testEntriesInsertWhitelist) t.Run("Profiles", testProfilesInsert) t.Run("Profiles", testProfilesInsertWhitelist) t.Run("Roles", testRolesInsert) @@ -102,6 +128,9 @@ func TestInsert(t *testing.T) { // TestToOne tests cannot be run in parallel // or deadlocks can occur. func TestToOne(t *testing.T) { + t.Run("BlogToUserUsingUser", testBlogToOneUserUsingUser) + t.Run("EntryToBlogUsingBlog", testEntryToOneBlogUsingBlog) + t.Run("EntryToUserUsingAuthor", testEntryToOneUserUsingAuthor) t.Run("ProfileToDepartmentUsingDepartment", testProfileToOneDepartmentUsingDepartment) t.Run("ProfileToRoleUsingRole", testProfileToOneRoleUsingRole) t.Run("UserToProfileUsingProfile", testUserToOneProfileUsingProfile) @@ -114,14 +143,20 @@ func TestOneToOne(t *testing.T) {} // TestToMany tests cannot be run in parallel // or deadlocks can occur. func TestToMany(t *testing.T) { + t.Run("BlogToEntries", testBlogToManyEntries) t.Run("DepartmentToProfiles", testDepartmentToManyProfiles) t.Run("ProfileToUsers", testProfileToManyUsers) t.Run("RoleToProfiles", testRoleToManyProfiles) + t.Run("UserToBlogs", testUserToManyBlogs) + t.Run("UserToAuthorEntries", testUserToManyAuthorEntries) } // TestToOneSet tests cannot be run in parallel // or deadlocks can occur. func TestToOneSet(t *testing.T) { + t.Run("BlogToUserUsingBlogs", testBlogToOneSetOpUserUsingUser) + t.Run("EntryToBlogUsingEntries", testEntryToOneSetOpBlogUsingBlog) + t.Run("EntryToUserUsingAuthorEntries", testEntryToOneSetOpUserUsingAuthor) t.Run("ProfileToDepartmentUsingProfiles", testProfileToOneSetOpDepartmentUsingDepartment) t.Run("ProfileToRoleUsingProfiles", testProfileToOneSetOpRoleUsingRole) t.Run("UserToProfileUsingUsers", testUserToOneSetOpProfileUsingProfile) @@ -146,9 +181,12 @@ func TestOneToOneRemove(t *testing.T) {} // TestToManyAdd tests cannot be run in parallel // or deadlocks can occur. func TestToManyAdd(t *testing.T) { + t.Run("BlogToEntries", testBlogToManyAddOpEntries) t.Run("DepartmentToProfiles", testDepartmentToManyAddOpProfiles) t.Run("ProfileToUsers", testProfileToManyAddOpUsers) t.Run("RoleToProfiles", testRoleToManyAddOpProfiles) + t.Run("UserToBlogs", testUserToManyAddOpBlogs) + t.Run("UserToAuthorEntries", testUserToManyAddOpAuthorEntries) } // TestToManySet tests cannot be run in parallel @@ -168,35 +206,45 @@ func TestToManyRemove(t *testing.T) { } func TestReload(t *testing.T) { + t.Run("Blogs", testBlogsReload) t.Run("Departments", testDepartmentsReload) + t.Run("Entries", testEntriesReload) t.Run("Profiles", testProfilesReload) t.Run("Roles", testRolesReload) t.Run("Users", testUsersReload) } func TestReloadAll(t *testing.T) { + t.Run("Blogs", testBlogsReloadAll) t.Run("Departments", testDepartmentsReloadAll) + t.Run("Entries", testEntriesReloadAll) t.Run("Profiles", testProfilesReloadAll) t.Run("Roles", testRolesReloadAll) t.Run("Users", testUsersReloadAll) } func TestSelect(t *testing.T) { + t.Run("Blogs", testBlogsSelect) t.Run("Departments", testDepartmentsSelect) + t.Run("Entries", testEntriesSelect) t.Run("Profiles", testProfilesSelect) t.Run("Roles", testRolesSelect) t.Run("Users", testUsersSelect) } func TestUpdate(t *testing.T) { + t.Run("Blogs", testBlogsUpdate) t.Run("Departments", testDepartmentsUpdate) + t.Run("Entries", testEntriesUpdate) t.Run("Profiles", testProfilesUpdate) t.Run("Roles", testRolesUpdate) t.Run("Users", testUsersUpdate) } func TestSliceUpdateAll(t *testing.T) { + t.Run("Blogs", testBlogsSliceUpdateAll) t.Run("Departments", testDepartmentsSliceUpdateAll) + t.Run("Entries", testEntriesSliceUpdateAll) t.Run("Profiles", testProfilesSliceUpdateAll) t.Run("Roles", testRolesSliceUpdateAll) t.Run("Users", testUsersSliceUpdateAll) diff --git a/infra/record/boil_table_names.go b/infra/record/boil_table_names.go index e27247ec..1dcba2b2 100644 --- a/infra/record/boil_table_names.go +++ b/infra/record/boil_table_names.go @@ -4,12 +4,16 @@ package record var TableNames = struct { + Blogs string Departments string + Entries string Profiles string Roles string Users string }{ + Blogs: "blogs", Departments: "departments", + Entries: "entries", Profiles: "profiles", Roles: "roles", Users: "users", diff --git a/infra/record/departments.go b/infra/record/departments.go index bf034fa8..2a49a289 100644 --- a/infra/record/departments.go +++ b/infra/record/departments.go @@ -43,24 +43,6 @@ var DepartmentColumns = struct { // Generated where -type whereHelperint64 struct{ field string } - -func (w whereHelperint64) EQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperint64) NEQ(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperint64) LT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperint64) LTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperint64) GT(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperint64) GTE(x int64) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } - -type whereHelperstring struct{ field string } - -func (w whereHelperstring) EQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.EQ, x) } -func (w whereHelperstring) NEQ(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.NEQ, x) } -func (w whereHelperstring) LT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LT, x) } -func (w whereHelperstring) LTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.LTE, x) } -func (w whereHelperstring) GT(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GT, x) } -func (w whereHelperstring) GTE(x string) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } - var DepartmentWhere = struct { ID whereHelperint64 Name whereHelperstring diff --git a/infra/record/entries.go b/infra/record/entries.go new file mode 100644 index 00000000..bf3ea470 --- /dev/null +++ b/infra/record/entries.go @@ -0,0 +1,1300 @@ +// Code generated by SQLBoiler (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package record + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/volatiletech/null" + "github.com/volatiletech/sqlboiler/boil" + "github.com/volatiletech/sqlboiler/queries" + "github.com/volatiletech/sqlboiler/queries/qm" + "github.com/volatiletech/sqlboiler/queries/qmhelper" + "github.com/volatiletech/sqlboiler/strmangle" +) + +// Entry is an object representing the database table. +type Entry struct { + ID int64 `boil:"id" json:"id" toml:"id" yaml:"id"` + Title string `boil:"title" json:"title" toml:"title" yaml:"title"` + Description string `boil:"description" json:"description" toml:"description" yaml:"description"` + Content string `boil:"content" json:"content" toml:"content" yaml:"content"` + Link string `boil:"link" json:"link" toml:"link" yaml:"link"` + AuthorID int64 `boil:"author_id" json:"author_id" toml:"author_id" yaml:"author_id"` + GUID string `boil:"guid" json:"guid" toml:"guid" yaml:"guid"` + BlogID int64 `boil:"blog_id" json:"blog_id" toml:"blog_id" yaml:"blog_id"` + CreatedAt time.Time `boil:"created_at" json:"created_at" toml:"created_at" yaml:"created_at"` + UpdatedAt time.Time `boil:"updated_at" json:"updated_at" toml:"updated_at" yaml:"updated_at"` + ImageURL string `boil:"image_url" json:"image_url" toml:"image_url" yaml:"image_url"` + PublishedAt null.Time `boil:"published_at" json:"published_at,omitempty" toml:"published_at" yaml:"published_at,omitempty"` + + R *entryR `boil:"-" json:"-" toml:"-" yaml:"-"` + L entryL `boil:"-" json:"-" toml:"-" yaml:"-"` +} + +var EntryColumns = struct { + ID string + Title string + Description string + Content string + Link string + AuthorID string + GUID string + BlogID string + CreatedAt string + UpdatedAt string + ImageURL string + PublishedAt string +}{ + ID: "id", + Title: "title", + Description: "description", + Content: "content", + Link: "link", + AuthorID: "author_id", + GUID: "guid", + BlogID: "blog_id", + CreatedAt: "created_at", + UpdatedAt: "updated_at", + ImageURL: "image_url", + PublishedAt: "published_at", +} + +// Generated where + +type whereHelpernull_Time struct{ field string } + +func (w whereHelpernull_Time) EQ(x null.Time) qm.QueryMod { + return qmhelper.WhereNullEQ(w.field, false, x) +} +func (w whereHelpernull_Time) NEQ(x null.Time) qm.QueryMod { + return qmhelper.WhereNullEQ(w.field, true, x) +} +func (w whereHelpernull_Time) IsNull() qm.QueryMod { return qmhelper.WhereIsNull(w.field) } +func (w whereHelpernull_Time) IsNotNull() qm.QueryMod { return qmhelper.WhereIsNotNull(w.field) } +func (w whereHelpernull_Time) LT(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LT, x) +} +func (w whereHelpernull_Time) LTE(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.LTE, x) +} +func (w whereHelpernull_Time) GT(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GT, x) +} +func (w whereHelpernull_Time) GTE(x null.Time) qm.QueryMod { + return qmhelper.Where(w.field, qmhelper.GTE, x) +} + +var EntryWhere = struct { + ID whereHelperint64 + Title whereHelperstring + Description whereHelperstring + Content whereHelperstring + Link whereHelperstring + AuthorID whereHelperint64 + GUID whereHelperstring + BlogID whereHelperint64 + CreatedAt whereHelpertime_Time + UpdatedAt whereHelpertime_Time + ImageURL whereHelperstring + PublishedAt whereHelpernull_Time +}{ + ID: whereHelperint64{field: `id`}, + Title: whereHelperstring{field: `title`}, + Description: whereHelperstring{field: `description`}, + Content: whereHelperstring{field: `content`}, + Link: whereHelperstring{field: `link`}, + AuthorID: whereHelperint64{field: `author_id`}, + GUID: whereHelperstring{field: `guid`}, + BlogID: whereHelperint64{field: `blog_id`}, + CreatedAt: whereHelpertime_Time{field: `created_at`}, + UpdatedAt: whereHelpertime_Time{field: `updated_at`}, + ImageURL: whereHelperstring{field: `image_url`}, + PublishedAt: whereHelpernull_Time{field: `published_at`}, +} + +// EntryRels is where relationship names are stored. +var EntryRels = struct { + Blog string + Author string +}{ + Blog: "Blog", + Author: "Author", +} + +// entryR is where relationships are stored. +type entryR struct { + Blog *Blog + Author *User +} + +// NewStruct creates a new relationship struct +func (*entryR) NewStruct() *entryR { + return &entryR{} +} + +// entryL is where Load methods for each relationship are stored. +type entryL struct{} + +var ( + entryColumns = []string{"id", "title", "description", "content", "link", "author_id", "guid", "blog_id", "created_at", "updated_at", "image_url", "published_at"} + entryColumnsWithoutDefault = []string{"title", "description", "content", "link", "author_id", "guid", "blog_id", "created_at", "updated_at", "image_url", "published_at"} + entryColumnsWithDefault = []string{"id"} + entryPrimaryKeyColumns = []string{"id"} +) + +type ( + // EntrySlice is an alias for a slice of pointers to Entry. + // This should generally be used opposed to []Entry. + EntrySlice []*Entry + // EntryHook is the signature for custom Entry hook methods + EntryHook func(context.Context, boil.ContextExecutor, *Entry) error + + entryQuery struct { + *queries.Query + } +) + +// Cache for insert, update and upsert +var ( + entryType = reflect.TypeOf(&Entry{}) + entryMapping = queries.MakeStructMapping(entryType) + entryPrimaryKeyMapping, _ = queries.BindMapping(entryType, entryMapping, entryPrimaryKeyColumns) + entryInsertCacheMut sync.RWMutex + entryInsertCache = make(map[string]insertCache) + entryUpdateCacheMut sync.RWMutex + entryUpdateCache = make(map[string]updateCache) + entryUpsertCacheMut sync.RWMutex + entryUpsertCache = make(map[string]insertCache) +) + +var ( + // Force time package dependency for automated UpdatedAt/CreatedAt. + _ = time.Second + // Force qmhelper dependency for where clause generation (which doesn't + // always happen) + _ = qmhelper.Where +) + +var entryBeforeInsertHooks []EntryHook +var entryBeforeUpdateHooks []EntryHook +var entryBeforeDeleteHooks []EntryHook +var entryBeforeUpsertHooks []EntryHook + +var entryAfterInsertHooks []EntryHook +var entryAfterSelectHooks []EntryHook +var entryAfterUpdateHooks []EntryHook +var entryAfterDeleteHooks []EntryHook +var entryAfterUpsertHooks []EntryHook + +// doBeforeInsertHooks executes all "before insert" hooks. +func (o *Entry) doBeforeInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryBeforeInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpdateHooks executes all "before Update" hooks. +func (o *Entry) doBeforeUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryBeforeUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeDeleteHooks executes all "before Delete" hooks. +func (o *Entry) doBeforeDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryBeforeDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doBeforeUpsertHooks executes all "before Upsert" hooks. +func (o *Entry) doBeforeUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryBeforeUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterInsertHooks executes all "after Insert" hooks. +func (o *Entry) doAfterInsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryAfterInsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterSelectHooks executes all "after Select" hooks. +func (o *Entry) doAfterSelectHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryAfterSelectHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpdateHooks executes all "after Update" hooks. +func (o *Entry) doAfterUpdateHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryAfterUpdateHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterDeleteHooks executes all "after Delete" hooks. +func (o *Entry) doAfterDeleteHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryAfterDeleteHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// doAfterUpsertHooks executes all "after Upsert" hooks. +func (o *Entry) doAfterUpsertHooks(ctx context.Context, exec boil.ContextExecutor) (err error) { + if boil.HooksAreSkipped(ctx) { + return nil + } + + for _, hook := range entryAfterUpsertHooks { + if err := hook(ctx, exec, o); err != nil { + return err + } + } + + return nil +} + +// AddEntryHook registers your hook function for all future operations. +func AddEntryHook(hookPoint boil.HookPoint, entryHook EntryHook) { + switch hookPoint { + case boil.BeforeInsertHook: + entryBeforeInsertHooks = append(entryBeforeInsertHooks, entryHook) + case boil.BeforeUpdateHook: + entryBeforeUpdateHooks = append(entryBeforeUpdateHooks, entryHook) + case boil.BeforeDeleteHook: + entryBeforeDeleteHooks = append(entryBeforeDeleteHooks, entryHook) + case boil.BeforeUpsertHook: + entryBeforeUpsertHooks = append(entryBeforeUpsertHooks, entryHook) + case boil.AfterInsertHook: + entryAfterInsertHooks = append(entryAfterInsertHooks, entryHook) + case boil.AfterSelectHook: + entryAfterSelectHooks = append(entryAfterSelectHooks, entryHook) + case boil.AfterUpdateHook: + entryAfterUpdateHooks = append(entryAfterUpdateHooks, entryHook) + case boil.AfterDeleteHook: + entryAfterDeleteHooks = append(entryAfterDeleteHooks, entryHook) + case boil.AfterUpsertHook: + entryAfterUpsertHooks = append(entryAfterUpsertHooks, entryHook) + } +} + +// One returns a single entry record from the query. +func (q entryQuery) One(ctx context.Context, exec boil.ContextExecutor) (*Entry, error) { + o := &Entry{} + + queries.SetLimit(q.Query, 1) + + err := q.Bind(ctx, exec, o) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "record: failed to execute a one query for entries") + } + + if err := o.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + + return o, nil +} + +// All returns all Entry records from the query. +func (q entryQuery) All(ctx context.Context, exec boil.ContextExecutor) (EntrySlice, error) { + var o []*Entry + + err := q.Bind(ctx, exec, &o) + if err != nil { + return nil, errors.Wrap(err, "record: failed to assign all query results to Entry slice") + } + + if len(entryAfterSelectHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterSelectHooks(ctx, exec); err != nil { + return o, err + } + } + } + + return o, nil +} + +// Count returns the count of all Entry records in the query. +func (q entryQuery) Count(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return 0, errors.Wrap(err, "record: failed to count entries rows") + } + + return count, nil +} + +// Exists checks if the row exists in the table. +func (q entryQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) { + var count int64 + + queries.SetSelect(q.Query, nil) + queries.SetCount(q.Query) + queries.SetLimit(q.Query, 1) + + err := q.Query.QueryRowContext(ctx, exec).Scan(&count) + if err != nil { + return false, errors.Wrap(err, "record: failed to check if entries exists") + } + + return count > 0, nil +} + +// Blog pointed to by the foreign key. +func (o *Entry) Blog(mods ...qm.QueryMod) blogQuery { + queryMods := []qm.QueryMod{ + qm.Where("id=?", o.BlogID), + } + + queryMods = append(queryMods, mods...) + + query := Blogs(queryMods...) + queries.SetFrom(query.Query, "\"blogs\"") + + return query +} + +// Author pointed to by the foreign key. +func (o *Entry) Author(mods ...qm.QueryMod) userQuery { + queryMods := []qm.QueryMod{ + qm.Where("id=?", o.AuthorID), + } + + queryMods = append(queryMods, mods...) + + query := Users(queryMods...) + queries.SetFrom(query.Query, "\"users\"") + + return query +} + +// LoadBlog allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for an N-1 relationship. +func (entryL) LoadBlog(ctx context.Context, e boil.ContextExecutor, singular bool, maybeEntry interface{}, mods queries.Applicator) error { + var slice []*Entry + var object *Entry + + if singular { + object = maybeEntry.(*Entry) + } else { + slice = *maybeEntry.(*[]*Entry) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &entryR{} + } + args = append(args, object.BlogID) + + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &entryR{} + } + + for _, a := range args { + if a == obj.BlogID { + continue Outer + } + } + + args = append(args, obj.BlogID) + + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`blogs`), qm.WhereIn(`id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load Blog") + } + + var resultSlice []*Blog + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice Blog") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results of eager load for blogs") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for blogs") + } + + if len(entryAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + + if len(resultSlice) == 0 { + return nil + } + + if singular { + foreign := resultSlice[0] + object.R.Blog = foreign + if foreign.R == nil { + foreign.R = &blogR{} + } + foreign.R.Entries = append(foreign.R.Entries, object) + return nil + } + + for _, local := range slice { + for _, foreign := range resultSlice { + if local.BlogID == foreign.ID { + local.R.Blog = foreign + if foreign.R == nil { + foreign.R = &blogR{} + } + foreign.R.Entries = append(foreign.R.Entries, local) + break + } + } + } + + return nil +} + +// LoadAuthor allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for an N-1 relationship. +func (entryL) LoadAuthor(ctx context.Context, e boil.ContextExecutor, singular bool, maybeEntry interface{}, mods queries.Applicator) error { + var slice []*Entry + var object *Entry + + if singular { + object = maybeEntry.(*Entry) + } else { + slice = *maybeEntry.(*[]*Entry) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &entryR{} + } + args = append(args, object.AuthorID) + + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &entryR{} + } + + for _, a := range args { + if a == obj.AuthorID { + continue Outer + } + } + + args = append(args, obj.AuthorID) + + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`users`), qm.WhereIn(`id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load User") + } + + var resultSlice []*User + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice User") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results of eager load for users") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for users") + } + + if len(entryAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + + if len(resultSlice) == 0 { + return nil + } + + if singular { + foreign := resultSlice[0] + object.R.Author = foreign + if foreign.R == nil { + foreign.R = &userR{} + } + foreign.R.AuthorEntries = append(foreign.R.AuthorEntries, object) + return nil + } + + for _, local := range slice { + for _, foreign := range resultSlice { + if local.AuthorID == foreign.ID { + local.R.Author = foreign + if foreign.R == nil { + foreign.R = &userR{} + } + foreign.R.AuthorEntries = append(foreign.R.AuthorEntries, local) + break + } + } + } + + return nil +} + +// SetBlog of the entry to the related item. +// Sets o.R.Blog to related. +// Adds o to related.R.Entries. +func (o *Entry) SetBlog(ctx context.Context, exec boil.ContextExecutor, insert bool, related *Blog) error { + var err error + if insert { + if err = related.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } + + updateQuery := fmt.Sprintf( + "UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"blog_id"}), + strmangle.WhereClause("\"", "\"", 2, entryPrimaryKeyColumns), + ) + values := []interface{}{related.ID, o.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update local table") + } + + o.BlogID = related.ID + if o.R == nil { + o.R = &entryR{ + Blog: related, + } + } else { + o.R.Blog = related + } + + if related.R == nil { + related.R = &blogR{ + Entries: EntrySlice{o}, + } + } else { + related.R.Entries = append(related.R.Entries, o) + } + + return nil +} + +// SetAuthor of the entry to the related item. +// Sets o.R.Author to related. +// Adds o to related.R.AuthorEntries. +func (o *Entry) SetAuthor(ctx context.Context, exec boil.ContextExecutor, insert bool, related *User) error { + var err error + if insert { + if err = related.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } + + updateQuery := fmt.Sprintf( + "UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"author_id"}), + strmangle.WhereClause("\"", "\"", 2, entryPrimaryKeyColumns), + ) + values := []interface{}{related.ID, o.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update local table") + } + + o.AuthorID = related.ID + if o.R == nil { + o.R = &entryR{ + Author: related, + } + } else { + o.R.Author = related + } + + if related.R == nil { + related.R = &userR{ + AuthorEntries: EntrySlice{o}, + } + } else { + related.R.AuthorEntries = append(related.R.AuthorEntries, o) + } + + return nil +} + +// Entries retrieves all the records using an executor. +func Entries(mods ...qm.QueryMod) entryQuery { + mods = append(mods, qm.From("\"entries\"")) + return entryQuery{NewQuery(mods...)} +} + +// FindEntry retrieves a single record by ID with an executor. +// If selectCols is empty Find will return all columns. +func FindEntry(ctx context.Context, exec boil.ContextExecutor, iD int64, selectCols ...string) (*Entry, error) { + entryObj := &Entry{} + + sel := "*" + if len(selectCols) > 0 { + sel = strings.Join(strmangle.IdentQuoteSlice(dialect.LQ, dialect.RQ, selectCols), ",") + } + query := fmt.Sprintf( + "select %s from \"entries\" where \"id\"=$1", sel, + ) + + q := queries.Raw(query, iD) + + err := q.Bind(ctx, exec, entryObj) + if err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return nil, sql.ErrNoRows + } + return nil, errors.Wrap(err, "record: unable to select from entries") + } + + return entryObj, nil +} + +// Insert a single record using an executor. +// See boil.Columns.InsertColumnSet documentation to understand column list inference for inserts. +func (o *Entry) Insert(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) error { + if o == nil { + return errors.New("record: no entries provided for insertion") + } + + var err error + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + if o.CreatedAt.IsZero() { + o.CreatedAt = currTime + } + if o.UpdatedAt.IsZero() { + o.UpdatedAt = currTime + } + } + + if err := o.doBeforeInsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(entryColumnsWithDefault, o) + + key := makeCacheKey(columns, nzDefaults) + entryInsertCacheMut.RLock() + cache, cached := entryInsertCache[key] + entryInsertCacheMut.RUnlock() + + if !cached { + wl, returnColumns := columns.InsertColumnSet( + entryColumns, + entryColumnsWithDefault, + entryColumnsWithoutDefault, + nzDefaults, + ) + + cache.valueMapping, err = queries.BindMapping(entryType, entryMapping, wl) + if err != nil { + return err + } + cache.retMapping, err = queries.BindMapping(entryType, entryMapping, returnColumns) + if err != nil { + return err + } + if len(wl) != 0 { + cache.query = fmt.Sprintf("INSERT INTO \"entries\" (\"%s\") %%sVALUES (%s)%%s", strings.Join(wl, "\",\""), strmangle.Placeholders(dialect.UseIndexPlaceholders, len(wl), 1, 1)) + } else { + cache.query = "INSERT INTO \"entries\" %sDEFAULT VALUES%s" + } + + var queryOutput, queryReturning string + + if len(cache.retMapping) != 0 { + queryReturning = fmt.Sprintf(" RETURNING \"%s\"", strings.Join(returnColumns, "\",\"")) + } + + cache.query = fmt.Sprintf(cache.query, queryOutput, queryReturning) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(queries.PtrsFromMapping(value, cache.retMapping)...) + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + + if err != nil { + return errors.Wrap(err, "record: unable to insert into entries") + } + + if !cached { + entryInsertCacheMut.Lock() + entryInsertCache[key] = cache + entryInsertCacheMut.Unlock() + } + + return o.doAfterInsertHooks(ctx, exec) +} + +// Update uses an executor to update the Entry. +// See boil.Columns.UpdateColumnSet documentation to understand column list inference for updates. +// Update does not automatically update the record in case of default values. Use .Reload() to refresh the records. +func (o *Entry) Update(ctx context.Context, exec boil.ContextExecutor, columns boil.Columns) (int64, error) { + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + o.UpdatedAt = currTime + } + + var err error + if err = o.doBeforeUpdateHooks(ctx, exec); err != nil { + return 0, err + } + key := makeCacheKey(columns, nil) + entryUpdateCacheMut.RLock() + cache, cached := entryUpdateCache[key] + entryUpdateCacheMut.RUnlock() + + if !cached { + wl := columns.UpdateColumnSet( + entryColumns, + entryPrimaryKeyColumns, + ) + + if !columns.IsWhitelist() { + wl = strmangle.SetComplement(wl, []string{"created_at"}) + } + if len(wl) == 0 { + return 0, errors.New("record: unable to update entries, could not build whitelist") + } + + cache.query = fmt.Sprintf("UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, wl), + strmangle.WhereClause("\"", "\"", len(wl)+1, entryPrimaryKeyColumns), + ) + cache.valueMapping, err = queries.BindMapping(entryType, entryMapping, append(wl, entryPrimaryKeyColumns...)) + if err != nil { + return 0, err + } + } + + values := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), cache.valueMapping) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, values) + } + + var result sql.Result + result, err = exec.ExecContext(ctx, cache.query, values...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update entries row") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by update for entries") + } + + if !cached { + entryUpdateCacheMut.Lock() + entryUpdateCache[key] = cache + entryUpdateCacheMut.Unlock() + } + + return rowsAff, o.doAfterUpdateHooks(ctx, exec) +} + +// UpdateAll updates all rows with the specified column values. +func (q entryQuery) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + queries.SetUpdate(q.Query, cols) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update all for entries") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: unable to retrieve rows affected for entries") + } + + return rowsAff, nil +} + +// UpdateAll updates all rows with the specified column values, using an executor. +func (o EntrySlice) UpdateAll(ctx context.Context, exec boil.ContextExecutor, cols M) (int64, error) { + ln := int64(len(o)) + if ln == 0 { + return 0, nil + } + + if len(cols) == 0 { + return 0, errors.New("record: update all requires at least one column argument") + } + + colNames := make([]string, len(cols)) + args := make([]interface{}, len(cols)) + + i := 0 + for name, value := range cols { + colNames[i] = name + args[i] = value + i++ + } + + // Append all of the primary key values for each column + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), entryPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := fmt.Sprintf("UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, colNames), + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), len(colNames)+1, entryPrimaryKeyColumns, len(o))) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args...) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to update all in entry slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: unable to retrieve rows affected all in update all entry") + } + return rowsAff, nil +} + +// Upsert attempts an insert using an executor, and does an update or ignore on conflict. +// See boil.Columns documentation for how to properly use updateColumns and insertColumns. +func (o *Entry) Upsert(ctx context.Context, exec boil.ContextExecutor, updateOnConflict bool, conflictColumns []string, updateColumns, insertColumns boil.Columns) error { + if o == nil { + return errors.New("record: no entries provided for upsert") + } + if !boil.TimestampsAreSkipped(ctx) { + currTime := time.Now().In(boil.GetLocation()) + + if o.CreatedAt.IsZero() { + o.CreatedAt = currTime + } + o.UpdatedAt = currTime + } + + if err := o.doBeforeUpsertHooks(ctx, exec); err != nil { + return err + } + + nzDefaults := queries.NonZeroDefaultSet(entryColumnsWithDefault, o) + + // Build cache key in-line uglily - mysql vs psql problems + buf := strmangle.GetBuffer() + if updateOnConflict { + buf.WriteByte('t') + } else { + buf.WriteByte('f') + } + buf.WriteByte('.') + for _, c := range conflictColumns { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(updateColumns.Kind)) + for _, c := range updateColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(insertColumns.Kind)) + for _, c := range insertColumns.Cols { + buf.WriteString(c) + } + buf.WriteByte('.') + for _, c := range nzDefaults { + buf.WriteString(c) + } + key := buf.String() + strmangle.PutBuffer(buf) + + entryUpsertCacheMut.RLock() + cache, cached := entryUpsertCache[key] + entryUpsertCacheMut.RUnlock() + + var err error + + if !cached { + insert, ret := insertColumns.InsertColumnSet( + entryColumns, + entryColumnsWithDefault, + entryColumnsWithoutDefault, + nzDefaults, + ) + update := updateColumns.UpdateColumnSet( + entryColumns, + entryPrimaryKeyColumns, + ) + + if updateOnConflict && len(update) == 0 { + return errors.New("record: unable to upsert entries, could not build update column list") + } + + conflict := conflictColumns + if len(conflict) == 0 { + conflict = make([]string, len(entryPrimaryKeyColumns)) + copy(conflict, entryPrimaryKeyColumns) + } + cache.query = buildUpsertQueryPostgres(dialect, "\"entries\"", updateOnConflict, ret, update, conflict, insert) + + cache.valueMapping, err = queries.BindMapping(entryType, entryMapping, insert) + if err != nil { + return err + } + if len(ret) != 0 { + cache.retMapping, err = queries.BindMapping(entryType, entryMapping, ret) + if err != nil { + return err + } + } + } + + value := reflect.Indirect(reflect.ValueOf(o)) + vals := queries.ValuesFromMapping(value, cache.valueMapping) + var returns []interface{} + if len(cache.retMapping) != 0 { + returns = queries.PtrsFromMapping(value, cache.retMapping) + } + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, cache.query) + fmt.Fprintln(boil.DebugWriter, vals) + } + + if len(cache.retMapping) != 0 { + err = exec.QueryRowContext(ctx, cache.query, vals...).Scan(returns...) + if err == sql.ErrNoRows { + err = nil // Postgres doesn't return anything when there's no update + } + } else { + _, err = exec.ExecContext(ctx, cache.query, vals...) + } + if err != nil { + return errors.Wrap(err, "record: unable to upsert entries") + } + + if !cached { + entryUpsertCacheMut.Lock() + entryUpsertCache[key] = cache + entryUpsertCacheMut.Unlock() + } + + return o.doAfterUpsertHooks(ctx, exec) +} + +// Delete deletes a single Entry record with an executor. +// Delete will match against the primary key column to find the record to delete. +func (o *Entry) Delete(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("record: no Entry provided for delete") + } + + if err := o.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + args := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(o)), entryPrimaryKeyMapping) + sql := "DELETE FROM \"entries\" WHERE \"id\"=$1" + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args...) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete from entries") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by delete for entries") + } + + if err := o.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + + return rowsAff, nil +} + +// DeleteAll deletes all matching rows. +func (q entryQuery) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if q.Query == nil { + return 0, errors.New("record: no entryQuery provided for delete all") + } + + queries.SetDelete(q.Query) + + result, err := q.Query.ExecContext(ctx, exec) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete all from entries") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by deleteall for entries") + } + + return rowsAff, nil +} + +// DeleteAll deletes all rows in the slice, using an executor. +func (o EntrySlice) DeleteAll(ctx context.Context, exec boil.ContextExecutor) (int64, error) { + if o == nil { + return 0, errors.New("record: no Entry slice provided for delete all") + } + + if len(o) == 0 { + return 0, nil + } + + if len(entryBeforeDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doBeforeDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + var args []interface{} + for _, obj := range o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), entryPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "DELETE FROM \"entries\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, entryPrimaryKeyColumns, len(o)) + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, args) + } + + result, err := exec.ExecContext(ctx, sql, args...) + if err != nil { + return 0, errors.Wrap(err, "record: unable to delete all from entry slice") + } + + rowsAff, err := result.RowsAffected() + if err != nil { + return 0, errors.Wrap(err, "record: failed to get rows affected by deleteall for entries") + } + + if len(entryAfterDeleteHooks) != 0 { + for _, obj := range o { + if err := obj.doAfterDeleteHooks(ctx, exec); err != nil { + return 0, err + } + } + } + + return rowsAff, nil +} + +// Reload refetches the object from the database +// using the primary keys with an executor. +func (o *Entry) Reload(ctx context.Context, exec boil.ContextExecutor) error { + ret, err := FindEntry(ctx, exec, o.ID) + if err != nil { + return err + } + + *o = *ret + return nil +} + +// ReloadAll refetches every row with matching primary key column values +// and overwrites the original object slice with the newly updated slice. +func (o *EntrySlice) ReloadAll(ctx context.Context, exec boil.ContextExecutor) error { + if o == nil || len(*o) == 0 { + return nil + } + + slice := EntrySlice{} + var args []interface{} + for _, obj := range *o { + pkeyArgs := queries.ValuesFromMapping(reflect.Indirect(reflect.ValueOf(obj)), entryPrimaryKeyMapping) + args = append(args, pkeyArgs...) + } + + sql := "SELECT \"entries\".* FROM \"entries\" WHERE " + + strmangle.WhereClauseRepeated(string(dialect.LQ), string(dialect.RQ), 1, entryPrimaryKeyColumns, len(*o)) + + q := queries.Raw(sql, args...) + + err := q.Bind(ctx, exec, &slice) + if err != nil { + return errors.Wrap(err, "record: unable to reload all in EntrySlice") + } + + *o = slice + + return nil +} + +// EntryExists checks if the Entry row exists. +func EntryExists(ctx context.Context, exec boil.ContextExecutor, iD int64) (bool, error) { + var exists bool + sql := "select exists(select 1 from \"entries\" where \"id\"=$1 limit 1)" + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, sql) + fmt.Fprintln(boil.DebugWriter, iD) + } + + row := exec.QueryRowContext(ctx, sql, iD) + + err := row.Scan(&exists) + if err != nil { + return false, errors.Wrap(err, "record: unable to check if entries exists") + } + + return exists, nil +} diff --git a/infra/record/entries_test.go b/infra/record/entries_test.go new file mode 100644 index 00000000..f19ecca7 --- /dev/null +++ b/infra/record/entries_test.go @@ -0,0 +1,949 @@ +// Code generated by SQLBoiler (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. +// This file is meant to be re-generated in place and/or deleted at any time. + +package record + +import ( + "bytes" + "context" + "reflect" + "testing" + + "github.com/volatiletech/sqlboiler/boil" + "github.com/volatiletech/sqlboiler/queries" + "github.com/volatiletech/sqlboiler/randomize" + "github.com/volatiletech/sqlboiler/strmangle" +) + +var ( + // Relationships sometimes use the reflection helper queries.Equal/queries.Assign + // so force a package dependency in case they don't. + _ = queries.Equal +) + +func testEntries(t *testing.T) { + t.Parallel() + + query := Entries() + + if query.Query == nil { + t.Error("expected a query, got nothing") + } +} + +func testEntriesDelete(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := o.Delete(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testEntriesQueryDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if rowsAff, err := Entries().DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testEntriesSliceDeleteAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := EntrySlice{o} + + if rowsAff, err := slice.DeleteAll(ctx, tx); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only have deleted one row, but affected:", rowsAff) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 0 { + t.Error("want zero records, got:", count) + } +} + +func testEntriesExists(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + e, err := EntryExists(ctx, tx, o.ID) + if err != nil { + t.Errorf("Unable to check if Entry exists: %s", err) + } + if !e { + t.Errorf("Expected EntryExists to return true, but got false.") + } +} + +func testEntriesFind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + entryFound, err := FindEntry(ctx, tx, o.ID) + if err != nil { + t.Error(err) + } + + if entryFound == nil { + t.Error("want a record, got nil") + } +} + +func testEntriesBind(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = Entries().Bind(ctx, tx, o); err != nil { + t.Error(err) + } +} + +func testEntriesOne(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if x, err := Entries().One(ctx, tx); err != nil { + t.Error(err) + } else if x == nil { + t.Error("expected to get a non nil record") + } +} + +func testEntriesAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + entryOne := &Entry{} + entryTwo := &Entry{} + if err = randomize.Struct(seed, entryOne, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + if err = randomize.Struct(seed, entryTwo, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = entryOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = entryTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Entries().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 2 { + t.Error("want 2 records, got:", len(slice)) + } +} + +func testEntriesCount(t *testing.T) { + t.Parallel() + + var err error + seed := randomize.NewSeed() + entryOne := &Entry{} + entryTwo := &Entry{} + if err = randomize.Struct(seed, entryOne, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + if err = randomize.Struct(seed, entryTwo, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = entryOne.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + if err = entryTwo.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 2 { + t.Error("want 2 records, got:", count) + } +} + +func entryBeforeInsertHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryAfterInsertHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryAfterSelectHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryBeforeUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryAfterUpdateHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryBeforeDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryAfterDeleteHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryBeforeUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func entryAfterUpsertHook(ctx context.Context, e boil.ContextExecutor, o *Entry) error { + *o = Entry{} + return nil +} + +func testEntriesHooks(t *testing.T) { + t.Parallel() + + var err error + + ctx := context.Background() + empty := &Entry{} + o := &Entry{} + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, o, entryDBTypes, false); err != nil { + t.Errorf("Unable to randomize Entry object: %s", err) + } + + AddEntryHook(boil.BeforeInsertHook, entryBeforeInsertHook) + if err = o.doBeforeInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeInsertHook function to empty object, but got: %#v", o) + } + entryBeforeInsertHooks = []EntryHook{} + + AddEntryHook(boil.AfterInsertHook, entryAfterInsertHook) + if err = o.doAfterInsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterInsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterInsertHook function to empty object, but got: %#v", o) + } + entryAfterInsertHooks = []EntryHook{} + + AddEntryHook(boil.AfterSelectHook, entryAfterSelectHook) + if err = o.doAfterSelectHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterSelectHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterSelectHook function to empty object, but got: %#v", o) + } + entryAfterSelectHooks = []EntryHook{} + + AddEntryHook(boil.BeforeUpdateHook, entryBeforeUpdateHook) + if err = o.doBeforeUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpdateHook function to empty object, but got: %#v", o) + } + entryBeforeUpdateHooks = []EntryHook{} + + AddEntryHook(boil.AfterUpdateHook, entryAfterUpdateHook) + if err = o.doAfterUpdateHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpdateHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpdateHook function to empty object, but got: %#v", o) + } + entryAfterUpdateHooks = []EntryHook{} + + AddEntryHook(boil.BeforeDeleteHook, entryBeforeDeleteHook) + if err = o.doBeforeDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeDeleteHook function to empty object, but got: %#v", o) + } + entryBeforeDeleteHooks = []EntryHook{} + + AddEntryHook(boil.AfterDeleteHook, entryAfterDeleteHook) + if err = o.doAfterDeleteHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterDeleteHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterDeleteHook function to empty object, but got: %#v", o) + } + entryAfterDeleteHooks = []EntryHook{} + + AddEntryHook(boil.BeforeUpsertHook, entryBeforeUpsertHook) + if err = o.doBeforeUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doBeforeUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected BeforeUpsertHook function to empty object, but got: %#v", o) + } + entryBeforeUpsertHooks = []EntryHook{} + + AddEntryHook(boil.AfterUpsertHook, entryAfterUpsertHook) + if err = o.doAfterUpsertHooks(ctx, nil); err != nil { + t.Errorf("Unable to execute doAfterUpsertHooks: %s", err) + } + if !reflect.DeepEqual(o, empty) { + t.Errorf("Expected AfterUpsertHook function to empty object, but got: %#v", o) + } + entryAfterUpsertHooks = []EntryHook{} +} + +func testEntriesInsert(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testEntriesInsertWhitelist(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Whitelist(entryColumnsWithoutDefault...)); err != nil { + t.Error(err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } +} + +func testEntryToOneBlogUsingBlog(t *testing.T) { + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var local Entry + var foreign Blog + + seed := randomize.NewSeed() + if err := randomize.Struct(seed, &local, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + if err := randomize.Struct(seed, &foreign, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Blog struct: %s", err) + } + + if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + local.BlogID = foreign.ID + if err := local.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := local.Blog().One(ctx, tx) + if err != nil { + t.Fatal(err) + } + + if check.ID != foreign.ID { + t.Errorf("want: %v, got %v", foreign.ID, check.ID) + } + + slice := EntrySlice{&local} + if err = local.L.LoadBlog(ctx, tx, false, (*[]*Entry)(&slice), nil); err != nil { + t.Fatal(err) + } + if local.R.Blog == nil { + t.Error("struct should have been eager loaded") + } + + local.R.Blog = nil + if err = local.L.LoadBlog(ctx, tx, true, &local, nil); err != nil { + t.Fatal(err) + } + if local.R.Blog == nil { + t.Error("struct should have been eager loaded") + } +} + +func testEntryToOneUserUsingAuthor(t *testing.T) { + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var local Entry + var foreign User + + seed := randomize.NewSeed() + if err := randomize.Struct(seed, &local, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + if err := randomize.Struct(seed, &foreign, userDBTypes, false, userColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize User struct: %s", err) + } + + if err := foreign.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + local.AuthorID = foreign.ID + if err := local.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := local.Author().One(ctx, tx) + if err != nil { + t.Fatal(err) + } + + if check.ID != foreign.ID { + t.Errorf("want: %v, got %v", foreign.ID, check.ID) + } + + slice := EntrySlice{&local} + if err = local.L.LoadAuthor(ctx, tx, false, (*[]*Entry)(&slice), nil); err != nil { + t.Fatal(err) + } + if local.R.Author == nil { + t.Error("struct should have been eager loaded") + } + + local.R.Author = nil + if err = local.L.LoadAuthor(ctx, tx, true, &local, nil); err != nil { + t.Fatal(err) + } + if local.R.Author == nil { + t.Error("struct should have been eager loaded") + } +} + +func testEntryToOneSetOpBlogUsingBlog(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a Entry + var b, c Blog + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, entryDBTypes, false, strmangle.SetComplement(entryPrimaryKeyColumns, entryColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &b, blogDBTypes, false, strmangle.SetComplement(blogPrimaryKeyColumns, blogColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, blogDBTypes, false, strmangle.SetComplement(blogPrimaryKeyColumns, blogColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + for i, x := range []*Blog{&b, &c} { + err = a.SetBlog(ctx, tx, i != 0, x) + if err != nil { + t.Fatal(err) + } + + if a.R.Blog != x { + t.Error("relationship struct not set to correct value") + } + + if x.R.Entries[0] != &a { + t.Error("failed to append to foreign relationship struct") + } + if a.BlogID != x.ID { + t.Error("foreign key was wrong value", a.BlogID) + } + + zero := reflect.Zero(reflect.TypeOf(a.BlogID)) + reflect.Indirect(reflect.ValueOf(&a.BlogID)).Set(zero) + + if err = a.Reload(ctx, tx); err != nil { + t.Fatal("failed to reload", err) + } + + if a.BlogID != x.ID { + t.Error("foreign key was wrong value", a.BlogID, x.ID) + } + } +} +func testEntryToOneSetOpUserUsingAuthor(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a Entry + var b, c User + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, entryDBTypes, false, strmangle.SetComplement(entryPrimaryKeyColumns, entryColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &b, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + for i, x := range []*User{&b, &c} { + err = a.SetAuthor(ctx, tx, i != 0, x) + if err != nil { + t.Fatal(err) + } + + if a.R.Author != x { + t.Error("relationship struct not set to correct value") + } + + if x.R.AuthorEntries[0] != &a { + t.Error("failed to append to foreign relationship struct") + } + if a.AuthorID != x.ID { + t.Error("foreign key was wrong value", a.AuthorID) + } + + zero := reflect.Zero(reflect.TypeOf(a.AuthorID)) + reflect.Indirect(reflect.ValueOf(&a.AuthorID)).Set(zero) + + if err = a.Reload(ctx, tx); err != nil { + t.Fatal("failed to reload", err) + } + + if a.AuthorID != x.ID { + t.Error("foreign key was wrong value", a.AuthorID, x.ID) + } + } +} + +func testEntriesReload(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + if err = o.Reload(ctx, tx); err != nil { + t.Error(err) + } +} + +func testEntriesReloadAll(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice := EntrySlice{o} + + if err = slice.ReloadAll(ctx, tx); err != nil { + t.Error(err) + } +} + +func testEntriesSelect(t *testing.T) { + t.Parallel() + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + slice, err := Entries().All(ctx, tx) + if err != nil { + t.Error(err) + } + + if len(slice) != 1 { + t.Error("want one record, got:", len(slice)) + } +} + +var ( + entryDBTypes = map[string]string{`ID`: `bigint`, `Title`: `character varying`, `Description`: `character varying`, `Content`: `text`, `Link`: `character varying`, `AuthorID`: `bigint`, `GUID`: `character varying`, `BlogID`: `bigint`, `CreatedAt`: `timestamp without time zone`, `UpdatedAt`: `timestamp without time zone`, `ImageURL`: `character varying`, `PublishedAt`: `timestamp without time zone`} + _ = bytes.MinRead +) + +func testEntriesUpdate(t *testing.T) { + t.Parallel() + + if 0 == len(entryPrimaryKeyColumns) { + t.Skip("Skipping table with no primary key columns") + } + if len(entryColumns) == len(entryPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, entryDBTypes, true, entryPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + if rowsAff, err := o.Update(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("should only affect one row but affected", rowsAff) + } +} + +func testEntriesSliceUpdateAll(t *testing.T) { + t.Parallel() + + if len(entryColumns) == len(entryPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + o := &Entry{} + if err = randomize.Struct(seed, o, entryDBTypes, true, entryColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Insert(ctx, tx, boil.Infer()); err != nil { + t.Error(err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + + if count != 1 { + t.Error("want one record, got:", count) + } + + if err = randomize.Struct(seed, o, entryDBTypes, true, entryPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + // Remove Primary keys and unique columns from what we plan to update + var fields []string + if strmangle.StringSliceMatch(entryColumns, entryPrimaryKeyColumns) { + fields = entryColumns + } else { + fields = strmangle.SetComplement( + entryColumns, + entryPrimaryKeyColumns, + ) + } + + value := reflect.Indirect(reflect.ValueOf(o)) + typ := reflect.TypeOf(o).Elem() + n := typ.NumField() + + updateMap := M{} + for _, col := range fields { + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.Tag.Get("boil") == col { + updateMap[col] = value.Field(i).Interface() + } + } + } + + slice := EntrySlice{o} + if rowsAff, err := slice.UpdateAll(ctx, tx, updateMap); err != nil { + t.Error(err) + } else if rowsAff != 1 { + t.Error("wanted one record updated but got", rowsAff) + } +} + +func testEntriesUpsert(t *testing.T) { + t.Parallel() + + if len(entryColumns) == len(entryPrimaryKeyColumns) { + t.Skip("Skipping table with only primary key columns") + } + + seed := randomize.NewSeed() + var err error + // Attempt the INSERT side of an UPSERT + o := Entry{} + if err = randomize.Struct(seed, &o, entryDBTypes, true); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + if err = o.Upsert(ctx, tx, false, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Entry: %s", err) + } + + count, err := Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } + + // Attempt the UPDATE side of an UPSERT + if err = randomize.Struct(seed, &o, entryDBTypes, false, entryPrimaryKeyColumns...); err != nil { + t.Errorf("Unable to randomize Entry struct: %s", err) + } + + if err = o.Upsert(ctx, tx, true, nil, boil.Infer(), boil.Infer()); err != nil { + t.Errorf("Unable to upsert Entry: %s", err) + } + + count, err = Entries().Count(ctx, tx) + if err != nil { + t.Error(err) + } + if count != 1 { + t.Error("want one record, got:", count) + } +} diff --git a/infra/record/profiles.go b/infra/record/profiles.go index b3595beb..2b2f7e00 100644 --- a/infra/record/profiles.go +++ b/infra/record/profiles.go @@ -155,27 +155,6 @@ func (w whereHelpernull_Int) GTE(x null.Int) qm.QueryMod { return qmhelper.Where(w.field, qmhelper.GTE, x) } -type whereHelpertime_Time struct{ field string } - -func (w whereHelpertime_Time) EQ(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.EQ, x) -} -func (w whereHelpertime_Time) NEQ(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.NEQ, x) -} -func (w whereHelpertime_Time) LT(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LT, x) -} -func (w whereHelpertime_Time) LTE(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.LTE, x) -} -func (w whereHelpertime_Time) GT(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GT, x) -} -func (w whereHelpertime_Time) GTE(x time.Time) qm.QueryMod { - return qmhelper.Where(w.field, qmhelper.GTE, x) -} - var ProfileWhere = struct { ID whereHelperint64 Description whereHelperstring diff --git a/infra/record/psql_suites_test.go b/infra/record/psql_suites_test.go index ee97d365..6a097200 100644 --- a/infra/record/psql_suites_test.go +++ b/infra/record/psql_suites_test.go @@ -6,8 +6,12 @@ package record import "testing" func TestUpsert(t *testing.T) { + t.Run("Blogs", testBlogsUpsert) + t.Run("Departments", testDepartmentsUpsert) + t.Run("Entries", testEntriesUpsert) + t.Run("Profiles", testProfilesUpsert) t.Run("Roles", testRolesUpsert) diff --git a/infra/record/users.go b/infra/record/users.go index 089ed652..09aef713 100644 --- a/infra/record/users.go +++ b/infra/record/users.go @@ -86,14 +86,20 @@ var UserWhere = struct { // UserRels is where relationship names are stored. var UserRels = struct { - Profile string + Profile string + Blogs string + AuthorEntries string }{ - Profile: "Profile", + Profile: "Profile", + Blogs: "Blogs", + AuthorEntries: "AuthorEntries", } // userR is where relationships are stored. type userR struct { - Profile *Profile + Profile *Profile + Blogs BlogSlice + AuthorEntries EntrySlice } // NewStruct creates a new relationship struct @@ -400,6 +406,48 @@ func (o *User) Profile(mods ...qm.QueryMod) profileQuery { return query } +// Blogs retrieves all the blog's Blogs with an executor. +func (o *User) Blogs(mods ...qm.QueryMod) blogQuery { + var queryMods []qm.QueryMod + if len(mods) != 0 { + queryMods = append(queryMods, mods...) + } + + queryMods = append(queryMods, + qm.Where("\"blogs\".\"user_id\"=?", o.ID), + ) + + query := Blogs(queryMods...) + queries.SetFrom(query.Query, "\"blogs\"") + + if len(queries.GetSelect(query.Query)) == 0 { + queries.SetSelect(query.Query, []string{"\"blogs\".*"}) + } + + return query +} + +// AuthorEntries retrieves all the entry's Entries with an executor via author_id column. +func (o *User) AuthorEntries(mods ...qm.QueryMod) entryQuery { + var queryMods []qm.QueryMod + if len(mods) != 0 { + queryMods = append(queryMods, mods...) + } + + queryMods = append(queryMods, + qm.Where("\"entries\".\"author_id\"=?", o.ID), + ) + + query := Entries(queryMods...) + queries.SetFrom(query.Query, "\"entries\"") + + if len(queries.GetSelect(query.Query)) == 0 { + queries.SetSelect(query.Query, []string{"\"entries\".*"}) + } + + return query +} + // LoadProfile allows an eager lookup of values, cached into the // loaded structs of the objects. This is for an N-1 relationship. func (userL) LoadProfile(ctx context.Context, e boil.ContextExecutor, singular bool, maybeUser interface{}, mods queries.Applicator) error { @@ -505,6 +553,196 @@ func (userL) LoadProfile(ctx context.Context, e boil.ContextExecutor, singular b return nil } +// LoadBlogs allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for a 1-M or N-M relationship. +func (userL) LoadBlogs(ctx context.Context, e boil.ContextExecutor, singular bool, maybeUser interface{}, mods queries.Applicator) error { + var slice []*User + var object *User + + if singular { + object = maybeUser.(*User) + } else { + slice = *maybeUser.(*[]*User) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &userR{} + } + args = append(args, object.ID) + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &userR{} + } + + for _, a := range args { + if a == obj.ID { + continue Outer + } + } + + args = append(args, obj.ID) + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`blogs`), qm.WhereIn(`user_id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load blogs") + } + + var resultSlice []*Blog + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice blogs") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results in eager load on blogs") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for blogs") + } + + if len(blogAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + if singular { + object.R.Blogs = resultSlice + for _, foreign := range resultSlice { + if foreign.R == nil { + foreign.R = &blogR{} + } + foreign.R.User = object + } + return nil + } + + for _, foreign := range resultSlice { + for _, local := range slice { + if local.ID == foreign.UserID { + local.R.Blogs = append(local.R.Blogs, foreign) + if foreign.R == nil { + foreign.R = &blogR{} + } + foreign.R.User = local + break + } + } + } + + return nil +} + +// LoadAuthorEntries allows an eager lookup of values, cached into the +// loaded structs of the objects. This is for a 1-M or N-M relationship. +func (userL) LoadAuthorEntries(ctx context.Context, e boil.ContextExecutor, singular bool, maybeUser interface{}, mods queries.Applicator) error { + var slice []*User + var object *User + + if singular { + object = maybeUser.(*User) + } else { + slice = *maybeUser.(*[]*User) + } + + args := make([]interface{}, 0, 1) + if singular { + if object.R == nil { + object.R = &userR{} + } + args = append(args, object.ID) + } else { + Outer: + for _, obj := range slice { + if obj.R == nil { + obj.R = &userR{} + } + + for _, a := range args { + if a == obj.ID { + continue Outer + } + } + + args = append(args, obj.ID) + } + } + + if len(args) == 0 { + return nil + } + + query := NewQuery(qm.From(`entries`), qm.WhereIn(`author_id in ?`, args...)) + if mods != nil { + mods.Apply(query) + } + + results, err := query.QueryContext(ctx, e) + if err != nil { + return errors.Wrap(err, "failed to eager load entries") + } + + var resultSlice []*Entry + if err = queries.Bind(results, &resultSlice); err != nil { + return errors.Wrap(err, "failed to bind eager loaded slice entries") + } + + if err = results.Close(); err != nil { + return errors.Wrap(err, "failed to close results in eager load on entries") + } + if err = results.Err(); err != nil { + return errors.Wrap(err, "error occurred during iteration of eager loaded relations for entries") + } + + if len(entryAfterSelectHooks) != 0 { + for _, obj := range resultSlice { + if err := obj.doAfterSelectHooks(ctx, e); err != nil { + return err + } + } + } + if singular { + object.R.AuthorEntries = resultSlice + for _, foreign := range resultSlice { + if foreign.R == nil { + foreign.R = &entryR{} + } + foreign.R.Author = object + } + return nil + } + + for _, foreign := range resultSlice { + for _, local := range slice { + if local.ID == foreign.AuthorID { + local.R.AuthorEntries = append(local.R.AuthorEntries, foreign) + if foreign.R == nil { + foreign.R = &entryR{} + } + foreign.R.Author = local + break + } + } + } + + return nil +} + // SetProfile of the user to the related item. // Sets o.R.Profile to related. // Adds o to related.R.Users. @@ -583,6 +821,112 @@ func (o *User) RemoveProfile(ctx context.Context, exec boil.ContextExecutor, rel return nil } +// AddBlogs adds the given related objects to the existing relationships +// of the user, optionally inserting them as new records. +// Appends related to o.R.Blogs. +// Sets related.R.User appropriately. +func (o *User) AddBlogs(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Blog) error { + var err error + for _, rel := range related { + if insert { + rel.UserID = o.ID + if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } else { + updateQuery := fmt.Sprintf( + "UPDATE \"blogs\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"user_id"}), + strmangle.WhereClause("\"", "\"", 2, blogPrimaryKeyColumns), + ) + values := []interface{}{o.ID, rel.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update foreign table") + } + + rel.UserID = o.ID + } + } + + if o.R == nil { + o.R = &userR{ + Blogs: related, + } + } else { + o.R.Blogs = append(o.R.Blogs, related...) + } + + for _, rel := range related { + if rel.R == nil { + rel.R = &blogR{ + User: o, + } + } else { + rel.R.User = o + } + } + return nil +} + +// AddAuthorEntries adds the given related objects to the existing relationships +// of the user, optionally inserting them as new records. +// Appends related to o.R.AuthorEntries. +// Sets related.R.Author appropriately. +func (o *User) AddAuthorEntries(ctx context.Context, exec boil.ContextExecutor, insert bool, related ...*Entry) error { + var err error + for _, rel := range related { + if insert { + rel.AuthorID = o.ID + if err = rel.Insert(ctx, exec, boil.Infer()); err != nil { + return errors.Wrap(err, "failed to insert into foreign table") + } + } else { + updateQuery := fmt.Sprintf( + "UPDATE \"entries\" SET %s WHERE %s", + strmangle.SetParamNames("\"", "\"", 1, []string{"author_id"}), + strmangle.WhereClause("\"", "\"", 2, entryPrimaryKeyColumns), + ) + values := []interface{}{o.ID, rel.ID} + + if boil.DebugMode { + fmt.Fprintln(boil.DebugWriter, updateQuery) + fmt.Fprintln(boil.DebugWriter, values) + } + + if _, err = exec.ExecContext(ctx, updateQuery, values...); err != nil { + return errors.Wrap(err, "failed to update foreign table") + } + + rel.AuthorID = o.ID + } + } + + if o.R == nil { + o.R = &userR{ + AuthorEntries: related, + } + } else { + o.R.AuthorEntries = append(o.R.AuthorEntries, related...) + } + + for _, rel := range related { + if rel.R == nil { + rel.R = &entryR{ + Author: o, + } + } else { + rel.R.Author = o + } + } + return nil +} + // Users retrieves all the records using an executor. func Users(mods ...qm.QueryMod) userQuery { mods = append(mods, qm.From("\"users\"")) diff --git a/infra/record/users_test.go b/infra/record/users_test.go index c968cc69..aed8a081 100644 --- a/infra/record/users_test.go +++ b/infra/record/users_test.go @@ -494,6 +494,312 @@ func testUsersInsertWhitelist(t *testing.T) { } } +func testUserToManyBlogs(t *testing.T) { + var err error + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a User + var b, c Blog + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, userDBTypes, true, userColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize User struct: %s", err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + if err = randomize.Struct(seed, &b, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, blogDBTypes, false, blogColumnsWithDefault...); err != nil { + t.Fatal(err) + } + + b.UserID = a.ID + c.UserID = a.ID + + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := a.Blogs().All(ctx, tx) + if err != nil { + t.Fatal(err) + } + + bFound, cFound := false, false + for _, v := range check { + if v.UserID == b.UserID { + bFound = true + } + if v.UserID == c.UserID { + cFound = true + } + } + + if !bFound { + t.Error("expected to find b") + } + if !cFound { + t.Error("expected to find c") + } + + slice := UserSlice{&a} + if err = a.L.LoadBlogs(ctx, tx, false, (*[]*User)(&slice), nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.Blogs); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + a.R.Blogs = nil + if err = a.L.LoadBlogs(ctx, tx, true, &a, nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.Blogs); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + if t.Failed() { + t.Logf("%#v", check) + } +} + +func testUserToManyAuthorEntries(t *testing.T) { + var err error + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a User + var b, c Entry + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, userDBTypes, true, userColumnsWithDefault...); err != nil { + t.Errorf("Unable to randomize User struct: %s", err) + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + if err = randomize.Struct(seed, &b, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Fatal(err) + } + if err = randomize.Struct(seed, &c, entryDBTypes, false, entryColumnsWithDefault...); err != nil { + t.Fatal(err) + } + + b.AuthorID = a.ID + c.AuthorID = a.ID + + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + check, err := a.AuthorEntries().All(ctx, tx) + if err != nil { + t.Fatal(err) + } + + bFound, cFound := false, false + for _, v := range check { + if v.AuthorID == b.AuthorID { + bFound = true + } + if v.AuthorID == c.AuthorID { + cFound = true + } + } + + if !bFound { + t.Error("expected to find b") + } + if !cFound { + t.Error("expected to find c") + } + + slice := UserSlice{&a} + if err = a.L.LoadAuthorEntries(ctx, tx, false, (*[]*User)(&slice), nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.AuthorEntries); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + a.R.AuthorEntries = nil + if err = a.L.LoadAuthorEntries(ctx, tx, true, &a, nil); err != nil { + t.Fatal(err) + } + if got := len(a.R.AuthorEntries); got != 2 { + t.Error("number of eager loaded records wrong, got:", got) + } + + if t.Failed() { + t.Logf("%#v", check) + } +} + +func testUserToManyAddOpBlogs(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a User + var b, c, d, e Blog + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + foreigners := []*Blog{&b, &c, &d, &e} + for _, x := range foreigners { + if err = randomize.Struct(seed, x, blogDBTypes, false, strmangle.SetComplement(blogPrimaryKeyColumns, blogColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + foreignersSplitByInsertion := [][]*Blog{ + {&b, &c}, + {&d, &e}, + } + + for i, x := range foreignersSplitByInsertion { + err = a.AddBlogs(ctx, tx, i != 0, x...) + if err != nil { + t.Fatal(err) + } + + first := x[0] + second := x[1] + + if a.ID != first.UserID { + t.Error("foreign key was wrong value", a.ID, first.UserID) + } + if a.ID != second.UserID { + t.Error("foreign key was wrong value", a.ID, second.UserID) + } + + if first.R.User != &a { + t.Error("relationship was not added properly to the foreign slice") + } + if second.R.User != &a { + t.Error("relationship was not added properly to the foreign slice") + } + + if a.R.Blogs[i*2] != first { + t.Error("relationship struct slice not set to correct value") + } + if a.R.Blogs[i*2+1] != second { + t.Error("relationship struct slice not set to correct value") + } + + count, err := a.Blogs().Count(ctx, tx) + if err != nil { + t.Fatal(err) + } + if want := int64((i + 1) * 2); count != want { + t.Error("want", want, "got", count) + } + } +} +func testUserToManyAddOpAuthorEntries(t *testing.T) { + var err error + + ctx := context.Background() + tx := MustTx(boil.BeginTx(ctx, nil)) + defer func() { _ = tx.Rollback() }() + + var a User + var b, c, d, e Entry + + seed := randomize.NewSeed() + if err = randomize.Struct(seed, &a, userDBTypes, false, strmangle.SetComplement(userPrimaryKeyColumns, userColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + foreigners := []*Entry{&b, &c, &d, &e} + for _, x := range foreigners { + if err = randomize.Struct(seed, x, entryDBTypes, false, strmangle.SetComplement(entryPrimaryKeyColumns, entryColumnsWithoutDefault)...); err != nil { + t.Fatal(err) + } + } + + if err := a.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = b.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + if err = c.Insert(ctx, tx, boil.Infer()); err != nil { + t.Fatal(err) + } + + foreignersSplitByInsertion := [][]*Entry{ + {&b, &c}, + {&d, &e}, + } + + for i, x := range foreignersSplitByInsertion { + err = a.AddAuthorEntries(ctx, tx, i != 0, x...) + if err != nil { + t.Fatal(err) + } + + first := x[0] + second := x[1] + + if a.ID != first.AuthorID { + t.Error("foreign key was wrong value", a.ID, first.AuthorID) + } + if a.ID != second.AuthorID { + t.Error("foreign key was wrong value", a.ID, second.AuthorID) + } + + if first.R.Author != &a { + t.Error("relationship was not added properly to the foreign slice") + } + if second.R.Author != &a { + t.Error("relationship was not added properly to the foreign slice") + } + + if a.R.AuthorEntries[i*2] != first { + t.Error("relationship struct slice not set to correct value") + } + if a.R.AuthorEntries[i*2+1] != second { + t.Error("relationship struct slice not set to correct value") + } + + count, err := a.AuthorEntries().Count(ctx, tx) + if err != nil { + t.Fatal(err) + } + if want := int64((i + 1) * 2); count != want { + t.Error("want", want, "got", count) + } + } +} func testUserToOneProfileUsingProfile(t *testing.T) { ctx := context.Background() tx := MustTx(boil.BeginTx(ctx, nil)) diff --git a/infra/store/entry/entry_store.go b/infra/store/entry/entry_store.go new file mode 100644 index 00000000..8b7554e9 --- /dev/null +++ b/infra/store/entry/entry_store.go @@ -0,0 +1,148 @@ +package entrystore + +import ( + "context" + "crypto/sha256" + "database/sql" + "encoding/base64" + "encoding/binary" + + "github.com/mmcdole/gofeed" + "github.com/pkg/errors" + "github.com/volatiletech/null" + "github.com/volatiletech/sqlboiler/boil" + "github.com/volatiletech/sqlboiler/queries/qm" + + "github.com/ProgrammingLab/prolab-accounts/app/util" + "github.com/ProgrammingLab/prolab-accounts/infra/record" + "github.com/ProgrammingLab/prolab-accounts/infra/store" + "github.com/ProgrammingLab/prolab-accounts/model" +) + +type entryStoreImpl struct { + ctx context.Context + db *sql.DB +} + +// NewEntryStore returns new entry blog store +func NewEntryStore(ctx context.Context, db *sql.DB) store.EntryStore { + return &entryStoreImpl{ + ctx: ctx, + db: db, + } +} + +func (s *entryStoreImpl) ListPublicEntries(maxEntryID int64, limit int) ([]*record.Entry, int64, error) { + mods := []qm.QueryMod{ + qm.Load(record.EntryRels.Author), + qm.Load(record.EntryRels.Blog), + qm.InnerJoin("users on users.id = entries.author_id"), + qm.InnerJoin("profiles on profiles.id = users.profile_id"), + qm.Where("profiles.profile_scope = ?", model.Public), + qm.Where("entries.id <= ?", maxEntryID), + qm.Limit(limit + 1), + qm.OrderBy("entries.id desc"), + } + + e, err := record.Entries(mods...).All(s.ctx, s.db) + if err != nil { + return nil, 0, errors.WithStack(err) + } + + if len(e) <= limit { + return e, 0, nil + } + return e[:limit], e[limit].ID, nil +} + +func (s *entryStoreImpl) CreateEntries(blog *record.Blog, feed *gofeed.Feed) (n int64, err error) { + rev := make([]*gofeed.Item, len(feed.Items)) + for i, item := range feed.Items { + rev[len(rev)-1-i] = item + } + feed.Items = rev + + tx, err := s.db.Begin() + if err != nil { + return 0, errors.WithStack(err) + } + defer func() { + if e := util.ErrorFromRecover(recover()); e != nil { + _ = tx.Rollback() + err = e + } + }() + + mods := []qm.QueryMod{ + qm.Select(record.EntryColumns.ID, record.EntryColumns.GUID), + qm.Where("blog_id = ?", blog.ID), + } + entries, err := record.Entries(mods...).All(s.ctx, tx) + if err != nil { + _ = tx.Rollback() + return 0, errors.WithStack(err) + } + + exists := make(map[string]struct{}) + for _, e := range entries { + exists[e.GUID] = struct{}{} + } + + n = 0 + for _, item := range feed.Items { + guid, err := getGUID(blog.ID, item.GUID) + if err != nil { + _ = tx.Rollback() + return 0, err + } + + _, ok := exists[guid] + if ok { + continue + } + + e := &record.Entry{ + Title: item.Title, + Description: item.Description, + Content: item.Content, + Link: item.Link, + AuthorID: blog.UserID, + GUID: guid, + BlogID: blog.ID, + } + if i := item.Image; i != nil { + e.ImageURL = i.URL + } + if t := item.PublishedParsed; t != nil { + e.PublishedAt = null.TimeFrom(t.In(boil.GetLocation())) + } + + err = e.Insert(s.ctx, tx, boil.Infer()) + if err != nil { + _ = tx.Rollback() + return 0, errors.WithStack(err) + } + n++ + } + + err = tx.Commit() + if err != nil { + _ = tx.Rollback() + return 0, errors.WithStack(err) + } + return n, nil +} + +func getGUID(blogID int64, guid string) (string, error) { + h := sha256.New() + err := binary.Write(h, binary.LittleEndian, blogID) + if err != nil { + return "", errors.WithStack(err) + } + _, err = h.Write([]byte(guid)) + if err != nil { + return "", errors.WithStack(err) + } + + return base64.RawURLEncoding.EncodeToString(h.Sum(nil)), nil +} diff --git a/infra/store/entry_store.go b/infra/store/entry_store.go new file mode 100644 index 00000000..cf20e4af --- /dev/null +++ b/infra/store/entry_store.go @@ -0,0 +1,12 @@ +package store + +import ( + "github.com/ProgrammingLab/prolab-accounts/infra/record" + "github.com/mmcdole/gofeed" +) + +// EntryStore accesses entry data +type EntryStore interface { + ListPublicEntries(maxEntryID int64, limit int) ([]*record.Entry, int64, error) + CreateEntries(blog *record.Blog, feed *gofeed.Feed) (int64, error) +} diff --git a/infra/store/feed/feed_store.go b/infra/store/feed/feed_store.go new file mode 100644 index 00000000..f7243fe5 --- /dev/null +++ b/infra/store/feed/feed_store.go @@ -0,0 +1,59 @@ +package feedstore + +import ( + "context" + "fmt" + "net/http" + + "github.com/ProgrammingLab/prolab-accounts/infra/store" + "github.com/mmcdole/gofeed" + "github.com/pkg/errors" +) + +type feedStoreImpl struct { + ctx context.Context +} + +// NewFeedStore returns new feed store +func NewFeedStore(ctx context.Context) store.FeedStore { + return &feedStoreImpl{ + ctx: ctx, + } +} + +type feedURLGetter func(blogURL string, cli *http.Client) (feed string, err error) + +var ( + // ErrFeedURLNotFound will be returned when feed url not found + ErrFeedURLNotFound = fmt.Errorf("feed url not found") + + feedURLGetters = []feedURLGetter{ + getMediumFeed, + getFeedURLWithSuffixes, + } +) + +func (s *feedStoreImpl) GetFeedURL(url string) (string, error) { + for _, g := range feedURLGetters { + u, err := g(url, &http.Client{}) + if err == nil { + return u, nil + } + } + + return "", ErrFeedURLNotFound +} + +func (s *feedStoreImpl) IsValidFeedURL(feedURL string) error { + _, err := s.GetFeed(feedURL) + return err +} + +func (s *feedStoreImpl) GetFeed(feedURL string) (*gofeed.Feed, error) { + p := gofeed.NewParser() + f, err := p.ParseURL(feedURL) + if err != nil { + return nil, errors.WithStack(err) + } + return f, nil +} diff --git a/infra/store/feed/feed_url.go b/infra/store/feed/feed_url.go new file mode 100644 index 00000000..6945c02f --- /dev/null +++ b/infra/store/feed/feed_url.go @@ -0,0 +1,101 @@ +package feedstore + +import ( + "net/http" + "strings" + + "github.com/mmcdole/gofeed" +) + +var suffixes = []string{ + // Qiita + "feed.atom", + // note.mu + "atom", + // Hatena blog + "feed", + // Word Press + "?feed=atom", + // Excite blog + "index.xml", +} + +type feedURLResult struct { + FeedURL string + Error error +} + +func getFeedURLWithSuffixes(url string, cli *http.Client) (string, error) { + c := make(chan *feedURLResult) + for _, s := range suffixes { + go func(suffix string, cli *http.Client) { + u, err := getFeedURL(url, suffix, cli) + if err == nil { + c <- &feedURLResult{ + FeedURL: u, + Error: nil, + } + return + } + + c <- &feedURLResult{ + FeedURL: "", + Error: err, + } + }(s, cli) + } + + var feed string + err := ErrFeedURLNotFound + for range suffixes { + res := <-c + if err == nil || res.Error != nil { + continue + } + + feed = res.FeedURL + err = res.Error + } + + return feed, err +} + +const ( + mediumPrefix = "https://medium.com/@" +) + +func getMediumFeed(url string, cli *http.Client) (string, error) { + if !strings.HasPrefix(url, mediumPrefix) { + return "", ErrFeedURLNotFound + } + + if url[len(url)-1] == '/' { + url = url[:len(url)-1] + } + name := url[len(mediumPrefix):] + feed := "https://medium.com/feed/@" + name + + p := gofeed.NewParser() + p.Client = cli + _, err := p.ParseURL(feed) + if err != nil { + return "", err + } + return feed, nil +} + +func getFeedURL(url, suffix string, cli *http.Client) (string, error) { + if url[len(url)-1] != '/' { + url += "/" + } + + url += suffix + p := gofeed.NewParser() + p.Client = cli + _, err := p.ParseURL(url) + if err != nil { + return "", err + } + + return url, nil +} diff --git a/infra/store/feed_store.go b/infra/store/feed_store.go new file mode 100644 index 00000000..7b33cadb --- /dev/null +++ b/infra/store/feed_store.go @@ -0,0 +1,12 @@ +package store + +import ( + "github.com/mmcdole/gofeed" +) + +// FeedStore provides feed +type FeedStore interface { + GetFeedURL(url string) (string, error) + IsValidFeedURL(url string) error + GetFeed(feedURL string) (*gofeed.Feed, error) +} diff --git a/infra/store/heartbeat.go b/infra/store/heartbeat.go new file mode 100644 index 00000000..0ff237c7 --- /dev/null +++ b/infra/store/heartbeat.go @@ -0,0 +1,7 @@ +package store + +// HeartbeatStore accesses worker's heartbeat +type HeartbeatStore interface { + Beat() error + GetHeartbeat() error +} diff --git a/infra/store/heartbeat/heatbeat_store.go b/infra/store/heartbeat/heatbeat_store.go new file mode 100644 index 00000000..7ce07a5e --- /dev/null +++ b/infra/store/heartbeat/heatbeat_store.go @@ -0,0 +1,42 @@ +package heartbeatstore + +import ( + "context" + "time" + + "github.com/go-redis/redis" + "github.com/pkg/errors" + + "github.com/ProgrammingLab/prolab-accounts/app/config" + "github.com/ProgrammingLab/prolab-accounts/infra/store" +) + +type heartbeatStoreImpl struct { + ctx context.Context + client *redis.Client + cfg *config.Config +} + +// NewHeartbeatStore returns new heartbeat store +func NewHeartbeatStore(ctx context.Context, cli *redis.Client, cfg *config.Config) store.HeartbeatStore { + return &heartbeatStoreImpl{ + ctx: ctx, + client: cli, + cfg: cfg, + } +} + +const key = "heartbeat" + +func (s *heartbeatStoreImpl) Beat() error { + exp := time.Duration(s.cfg.JobIntervalSec) * time.Second * 2 + err := s.client.Set(key, "dokidoki", exp).Err() + if err != nil { + return errors.WithStack(err) + } + return nil +} + +func (s *heartbeatStoreImpl) GetHeartbeat() error { + return errors.WithStack(s.client.Get(key).Err()) +} diff --git a/infra/store/user_blog/user_blog_store.go b/infra/store/user_blog/user_blog_store.go new file mode 100644 index 00000000..2ce414e4 --- /dev/null +++ b/infra/store/user_blog/user_blog_store.go @@ -0,0 +1,122 @@ +package userblogstore + +import ( + "context" + "database/sql" + + "github.com/pkg/errors" + "github.com/volatiletech/sqlboiler/boil" + + "github.com/ProgrammingLab/prolab-accounts/app/util" + "github.com/ProgrammingLab/prolab-accounts/infra/record" + "github.com/ProgrammingLab/prolab-accounts/infra/store" +) + +type userBlogStoreImpl struct { + ctx context.Context + db *sql.DB +} + +// NewUserBlogStore returns new user blog store +func NewUserBlogStore(ctx context.Context, db *sql.DB) store.UserBlogStore { + return &userBlogStoreImpl{ + ctx: ctx, + db: db, + } +} + +func (s *userBlogStoreImpl) ListUserBlogs() ([]*record.Blog, error) { + b, err := record.Blogs().All(s.ctx, s.db) + if err != nil { + return nil, errors.WithStack(err) + } + return b, nil +} + +func (s *userBlogStoreImpl) GetUserBlog(blogID int64) (*record.Blog, error) { + b, err := record.FindBlog(s.ctx, s.db, blogID) + if err != nil { + return nil, errors.WithStack(err) + } + return b, nil +} + +func (s *userBlogStoreImpl) CreateUserBlog(blog *record.Blog) error { + blog.ID = 0 + err := blog.Insert(s.ctx, s.db, boil.Infer()) + if err != nil { + return errors.WithStack(err) + } + + return nil +} + +func (s *userBlogStoreImpl) UpdateUserBlog(blog *record.Blog) (err error) { + tx, err := s.db.Begin() + if err != nil { + return errors.WithStack(err) + } + defer func() { + if e := util.ErrorFromRecover(recover()); e != nil { + _ = tx.Rollback() + err = e + } + }() + + exists, err := record.FindBlog(s.ctx, tx, blog.ID) + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + if exists.UserID != blog.UserID { + _ = tx.Rollback() + return sql.ErrNoRows + } + + _, err = blog.Update(s.ctx, tx, boil.Infer()) + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + + err = tx.Commit() + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + + return nil +} + +func (s *userBlogStoreImpl) DeleteUserBlog(blogID int64) (err error) { + tx, err := s.db.Begin() + if err != nil { + return errors.WithStack(err) + } + defer func() { + if e := util.ErrorFromRecover(recover()); e != nil { + _ = tx.Rollback() + err = e + } + }() + + _, err = record.Entries(record.EntryWhere.BlogID.EQ(blogID)).DeleteAll(s.ctx, tx) + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + + _, err = record.Blogs(record.BlogWhere.ID.EQ(blogID)).DeleteAll(s.ctx, tx) + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + + err = tx.Commit() + if err != nil { + _ = tx.Rollback() + return errors.WithStack(err) + } + + return nil +} diff --git a/infra/store/user_blog_store.go b/infra/store/user_blog_store.go new file mode 100644 index 00000000..ff53ead5 --- /dev/null +++ b/infra/store/user_blog_store.go @@ -0,0 +1,14 @@ +package store + +import ( + "github.com/ProgrammingLab/prolab-accounts/infra/record" +) + +// UserBlogStore accesses users data +type UserBlogStore interface { + ListUserBlogs() ([]*record.Blog, error) + GetUserBlog(blogID int64) (*record.Blog, error) + CreateUserBlog(blog *record.Blog) error + UpdateUserBlog(blog *record.Blog) error + DeleteUserBlog(blogID int64) error +}