Files
MonkeyCode/backend/db/aitask_query.go
2025-08-25 19:36:48 +08:00

579 lines
16 KiB
Go

// Code generated by ent, DO NOT EDIT.
package db
import (
"context"
"fmt"
"math"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"entgo.io/ent/schema/field"
"github.com/chaitin/MonkeyCode/backend/db/aitask"
"github.com/chaitin/MonkeyCode/backend/db/predicate"
"github.com/google/uuid"
)
// AITaskQuery is the builder for querying AITask entities.
type AITaskQuery struct {
config
ctx *QueryContext
order []aitask.OrderOption
inters []Interceptor
predicates []predicate.AITask
modifiers []func(*sql.Selector)
// intermediate query (i.e. traversal path).
sql *sql.Selector
path func(context.Context) (*sql.Selector, error)
}
// Where adds a new predicate for the AITaskQuery builder.
func (atq *AITaskQuery) Where(ps ...predicate.AITask) *AITaskQuery {
atq.predicates = append(atq.predicates, ps...)
return atq
}
// Limit the number of records to be returned by this query.
func (atq *AITaskQuery) Limit(limit int) *AITaskQuery {
atq.ctx.Limit = &limit
return atq
}
// Offset to start from.
func (atq *AITaskQuery) Offset(offset int) *AITaskQuery {
atq.ctx.Offset = &offset
return atq
}
// Unique configures the query builder to filter duplicate records on query.
// By default, unique is set to true, and can be disabled using this method.
func (atq *AITaskQuery) Unique(unique bool) *AITaskQuery {
atq.ctx.Unique = &unique
return atq
}
// Order specifies how the records should be ordered.
func (atq *AITaskQuery) Order(o ...aitask.OrderOption) *AITaskQuery {
atq.order = append(atq.order, o...)
return atq
}
// First returns the first AITask entity from the query.
// Returns a *NotFoundError when no AITask was found.
func (atq *AITaskQuery) First(ctx context.Context) (*AITask, error) {
nodes, err := atq.Limit(1).All(setContextOp(ctx, atq.ctx, ent.OpQueryFirst))
if err != nil {
return nil, err
}
if len(nodes) == 0 {
return nil, &NotFoundError{aitask.Label}
}
return nodes[0], nil
}
// FirstX is like First, but panics if an error occurs.
func (atq *AITaskQuery) FirstX(ctx context.Context) *AITask {
node, err := atq.First(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return node
}
// FirstID returns the first AITask ID from the query.
// Returns a *NotFoundError when no AITask ID was found.
func (atq *AITaskQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = atq.Limit(1).IDs(setContextOp(ctx, atq.ctx, ent.OpQueryFirstID)); err != nil {
return
}
if len(ids) == 0 {
err = &NotFoundError{aitask.Label}
return
}
return ids[0], nil
}
// FirstIDX is like FirstID, but panics if an error occurs.
func (atq *AITaskQuery) FirstIDX(ctx context.Context) uuid.UUID {
id, err := atq.FirstID(ctx)
if err != nil && !IsNotFound(err) {
panic(err)
}
return id
}
// Only returns a single AITask entity found by the query, ensuring it only returns one.
// Returns a *NotSingularError when more than one AITask entity is found.
// Returns a *NotFoundError when no AITask entities are found.
func (atq *AITaskQuery) Only(ctx context.Context) (*AITask, error) {
nodes, err := atq.Limit(2).All(setContextOp(ctx, atq.ctx, ent.OpQueryOnly))
if err != nil {
return nil, err
}
switch len(nodes) {
case 1:
return nodes[0], nil
case 0:
return nil, &NotFoundError{aitask.Label}
default:
return nil, &NotSingularError{aitask.Label}
}
}
// OnlyX is like Only, but panics if an error occurs.
func (atq *AITaskQuery) OnlyX(ctx context.Context) *AITask {
node, err := atq.Only(ctx)
if err != nil {
panic(err)
}
return node
}
// OnlyID is like Only, but returns the only AITask ID in the query.
// Returns a *NotSingularError when more than one AITask ID is found.
// Returns a *NotFoundError when no entities are found.
func (atq *AITaskQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
var ids []uuid.UUID
if ids, err = atq.Limit(2).IDs(setContextOp(ctx, atq.ctx, ent.OpQueryOnlyID)); err != nil {
return
}
switch len(ids) {
case 1:
id = ids[0]
case 0:
err = &NotFoundError{aitask.Label}
default:
err = &NotSingularError{aitask.Label}
}
return
}
// OnlyIDX is like OnlyID, but panics if an error occurs.
func (atq *AITaskQuery) OnlyIDX(ctx context.Context) uuid.UUID {
id, err := atq.OnlyID(ctx)
if err != nil {
panic(err)
}
return id
}
// All executes the query and returns a list of AITasks.
func (atq *AITaskQuery) All(ctx context.Context) ([]*AITask, error) {
ctx = setContextOp(ctx, atq.ctx, ent.OpQueryAll)
if err := atq.prepareQuery(ctx); err != nil {
return nil, err
}
qr := querierAll[[]*AITask, *AITaskQuery]()
return withInterceptors[[]*AITask](ctx, atq, qr, atq.inters)
}
// AllX is like All, but panics if an error occurs.
func (atq *AITaskQuery) AllX(ctx context.Context) []*AITask {
nodes, err := atq.All(ctx)
if err != nil {
panic(err)
}
return nodes
}
// IDs executes the query and returns a list of AITask IDs.
func (atq *AITaskQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
if atq.ctx.Unique == nil && atq.path != nil {
atq.Unique(true)
}
ctx = setContextOp(ctx, atq.ctx, ent.OpQueryIDs)
if err = atq.Select(aitask.FieldID).Scan(ctx, &ids); err != nil {
return nil, err
}
return ids, nil
}
// IDsX is like IDs, but panics if an error occurs.
func (atq *AITaskQuery) IDsX(ctx context.Context) []uuid.UUID {
ids, err := atq.IDs(ctx)
if err != nil {
panic(err)
}
return ids
}
// Count returns the count of the given query.
func (atq *AITaskQuery) Count(ctx context.Context) (int, error) {
ctx = setContextOp(ctx, atq.ctx, ent.OpQueryCount)
if err := atq.prepareQuery(ctx); err != nil {
return 0, err
}
return withInterceptors[int](ctx, atq, querierCount[*AITaskQuery](), atq.inters)
}
// CountX is like Count, but panics if an error occurs.
func (atq *AITaskQuery) CountX(ctx context.Context) int {
count, err := atq.Count(ctx)
if err != nil {
panic(err)
}
return count
}
// Exist returns true if the query has elements in the graph.
func (atq *AITaskQuery) Exist(ctx context.Context) (bool, error) {
ctx = setContextOp(ctx, atq.ctx, ent.OpQueryExist)
switch _, err := atq.FirstID(ctx); {
case IsNotFound(err):
return false, nil
case err != nil:
return false, fmt.Errorf("db: check existence: %w", err)
default:
return true, nil
}
}
// ExistX is like Exist, but panics if an error occurs.
func (atq *AITaskQuery) ExistX(ctx context.Context) bool {
exist, err := atq.Exist(ctx)
if err != nil {
panic(err)
}
return exist
}
// Clone returns a duplicate of the AITaskQuery builder, including all associated steps. It can be
// used to prepare common query builders and use them differently after the clone is made.
func (atq *AITaskQuery) Clone() *AITaskQuery {
if atq == nil {
return nil
}
return &AITaskQuery{
config: atq.config,
ctx: atq.ctx.Clone(),
order: append([]aitask.OrderOption{}, atq.order...),
inters: append([]Interceptor{}, atq.inters...),
predicates: append([]predicate.AITask{}, atq.predicates...),
// clone intermediate query.
sql: atq.sql.Clone(),
path: atq.path,
modifiers: append([]func(*sql.Selector){}, atq.modifiers...),
}
}
// GroupBy is used to group vertices by one or more fields/columns.
// It is often used with aggregate functions, like: count, max, mean, min, sum.
//
// Example:
//
// var v []struct {
// EmployeeID uuid.UUID `json:"employee_id,omitempty"`
// Count int `json:"count,omitempty"`
// }
//
// client.AITask.Query().
// GroupBy(aitask.FieldEmployeeID).
// Aggregate(db.Count()).
// Scan(ctx, &v)
func (atq *AITaskQuery) GroupBy(field string, fields ...string) *AITaskGroupBy {
atq.ctx.Fields = append([]string{field}, fields...)
grbuild := &AITaskGroupBy{build: atq}
grbuild.flds = &atq.ctx.Fields
grbuild.label = aitask.Label
grbuild.scan = grbuild.Scan
return grbuild
}
// Select allows the selection one or more fields/columns for the given query,
// instead of selecting all fields in the entity.
//
// Example:
//
// var v []struct {
// EmployeeID uuid.UUID `json:"employee_id,omitempty"`
// }
//
// client.AITask.Query().
// Select(aitask.FieldEmployeeID).
// Scan(ctx, &v)
func (atq *AITaskQuery) Select(fields ...string) *AITaskSelect {
atq.ctx.Fields = append(atq.ctx.Fields, fields...)
sbuild := &AITaskSelect{AITaskQuery: atq}
sbuild.label = aitask.Label
sbuild.flds, sbuild.scan = &atq.ctx.Fields, sbuild.Scan
return sbuild
}
// Aggregate returns a AITaskSelect configured with the given aggregations.
func (atq *AITaskQuery) Aggregate(fns ...AggregateFunc) *AITaskSelect {
return atq.Select().Aggregate(fns...)
}
func (atq *AITaskQuery) prepareQuery(ctx context.Context) error {
for _, inter := range atq.inters {
if inter == nil {
return fmt.Errorf("db: uninitialized interceptor (forgotten import db/runtime?)")
}
if trv, ok := inter.(Traverser); ok {
if err := trv.Traverse(ctx, atq); err != nil {
return err
}
}
}
for _, f := range atq.ctx.Fields {
if !aitask.ValidColumn(f) {
return &ValidationError{Name: f, err: fmt.Errorf("db: invalid field %q for query", f)}
}
}
if atq.path != nil {
prev, err := atq.path(ctx)
if err != nil {
return err
}
atq.sql = prev
}
return nil
}
func (atq *AITaskQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AITask, error) {
var (
nodes = []*AITask{}
_spec = atq.querySpec()
)
_spec.ScanValues = func(columns []string) ([]any, error) {
return (*AITask).scanValues(nil, columns)
}
_spec.Assign = func(columns []string, values []any) error {
node := &AITask{config: atq.config}
nodes = append(nodes, node)
return node.assignValues(columns, values)
}
if len(atq.modifiers) > 0 {
_spec.Modifiers = atq.modifiers
}
for i := range hooks {
hooks[i](ctx, _spec)
}
if err := sqlgraph.QueryNodes(ctx, atq.driver, _spec); err != nil {
return nil, err
}
if len(nodes) == 0 {
return nodes, nil
}
return nodes, nil
}
func (atq *AITaskQuery) sqlCount(ctx context.Context) (int, error) {
_spec := atq.querySpec()
if len(atq.modifiers) > 0 {
_spec.Modifiers = atq.modifiers
}
_spec.Node.Columns = atq.ctx.Fields
if len(atq.ctx.Fields) > 0 {
_spec.Unique = atq.ctx.Unique != nil && *atq.ctx.Unique
}
return sqlgraph.CountNodes(ctx, atq.driver, _spec)
}
func (atq *AITaskQuery) querySpec() *sqlgraph.QuerySpec {
_spec := sqlgraph.NewQuerySpec(aitask.Table, aitask.Columns, sqlgraph.NewFieldSpec(aitask.FieldID, field.TypeUUID))
_spec.From = atq.sql
if unique := atq.ctx.Unique; unique != nil {
_spec.Unique = *unique
} else if atq.path != nil {
_spec.Unique = true
}
if fields := atq.ctx.Fields; len(fields) > 0 {
_spec.Node.Columns = make([]string, 0, len(fields))
_spec.Node.Columns = append(_spec.Node.Columns, aitask.FieldID)
for i := range fields {
if fields[i] != aitask.FieldID {
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
}
}
}
if ps := atq.predicates; len(ps) > 0 {
_spec.Predicate = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
if limit := atq.ctx.Limit; limit != nil {
_spec.Limit = *limit
}
if offset := atq.ctx.Offset; offset != nil {
_spec.Offset = *offset
}
if ps := atq.order; len(ps) > 0 {
_spec.Order = func(selector *sql.Selector) {
for i := range ps {
ps[i](selector)
}
}
}
return _spec
}
func (atq *AITaskQuery) sqlQuery(ctx context.Context) *sql.Selector {
builder := sql.Dialect(atq.driver.Dialect())
t1 := builder.Table(aitask.Table)
columns := atq.ctx.Fields
if len(columns) == 0 {
columns = aitask.Columns
}
selector := builder.Select(t1.Columns(columns...)...).From(t1)
if atq.sql != nil {
selector = atq.sql
selector.Select(selector.Columns(columns...)...)
}
if atq.ctx.Unique != nil && *atq.ctx.Unique {
selector.Distinct()
}
for _, m := range atq.modifiers {
m(selector)
}
for _, p := range atq.predicates {
p(selector)
}
for _, p := range atq.order {
p(selector)
}
if offset := atq.ctx.Offset; offset != nil {
// limit is mandatory for offset clause. We start
// with default value, and override it below if needed.
selector.Offset(*offset).Limit(math.MaxInt32)
}
if limit := atq.ctx.Limit; limit != nil {
selector.Limit(*limit)
}
return selector
}
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
// either committed or rolled-back.
func (atq *AITaskQuery) ForUpdate(opts ...sql.LockOption) *AITaskQuery {
if atq.driver.Dialect() == dialect.Postgres {
atq.Unique(false)
}
atq.modifiers = append(atq.modifiers, func(s *sql.Selector) {
s.ForUpdate(opts...)
})
return atq
}
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
// on any rows that are read. Other sessions can read the rows, but cannot modify them
// until your transaction commits.
func (atq *AITaskQuery) ForShare(opts ...sql.LockOption) *AITaskQuery {
if atq.driver.Dialect() == dialect.Postgres {
atq.Unique(false)
}
atq.modifiers = append(atq.modifiers, func(s *sql.Selector) {
s.ForShare(opts...)
})
return atq
}
// Modify adds a query modifier for attaching custom logic to queries.
func (atq *AITaskQuery) Modify(modifiers ...func(s *sql.Selector)) *AITaskSelect {
atq.modifiers = append(atq.modifiers, modifiers...)
return atq.Select()
}
// AITaskGroupBy is the group-by builder for AITask entities.
type AITaskGroupBy struct {
selector
build *AITaskQuery
}
// Aggregate adds the given aggregation functions to the group-by query.
func (atgb *AITaskGroupBy) Aggregate(fns ...AggregateFunc) *AITaskGroupBy {
atgb.fns = append(atgb.fns, fns...)
return atgb
}
// Scan applies the selector query and scans the result into the given value.
func (atgb *AITaskGroupBy) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, atgb.build.ctx, ent.OpQueryGroupBy)
if err := atgb.build.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*AITaskQuery, *AITaskGroupBy](ctx, atgb.build, atgb, atgb.build.inters, v)
}
func (atgb *AITaskGroupBy) sqlScan(ctx context.Context, root *AITaskQuery, v any) error {
selector := root.sqlQuery(ctx).Select()
aggregation := make([]string, 0, len(atgb.fns))
for _, fn := range atgb.fns {
aggregation = append(aggregation, fn(selector))
}
if len(selector.SelectedColumns()) == 0 {
columns := make([]string, 0, len(*atgb.flds)+len(atgb.fns))
for _, f := range *atgb.flds {
columns = append(columns, selector.C(f))
}
columns = append(columns, aggregation...)
selector.Select(columns...)
}
selector.GroupBy(selector.Columns(*atgb.flds...)...)
if err := selector.Err(); err != nil {
return err
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := atgb.build.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// AITaskSelect is the builder for selecting fields of AITask entities.
type AITaskSelect struct {
*AITaskQuery
selector
}
// Aggregate adds the given aggregation functions to the selector query.
func (ats *AITaskSelect) Aggregate(fns ...AggregateFunc) *AITaskSelect {
ats.fns = append(ats.fns, fns...)
return ats
}
// Scan applies the selector query and scans the result into the given value.
func (ats *AITaskSelect) Scan(ctx context.Context, v any) error {
ctx = setContextOp(ctx, ats.ctx, ent.OpQuerySelect)
if err := ats.prepareQuery(ctx); err != nil {
return err
}
return scanWithInterceptors[*AITaskQuery, *AITaskSelect](ctx, ats.AITaskQuery, ats, ats.inters, v)
}
func (ats *AITaskSelect) sqlScan(ctx context.Context, root *AITaskQuery, v any) error {
selector := root.sqlQuery(ctx)
aggregation := make([]string, 0, len(ats.fns))
for _, fn := range ats.fns {
aggregation = append(aggregation, fn(selector))
}
switch n := len(*ats.selector.flds); {
case n == 0 && len(aggregation) > 0:
selector.Select(aggregation...)
case n != 0 && len(aggregation) > 0:
selector.AppendSelect(aggregation...)
}
rows := &sql.Rows{}
query, args := selector.Query()
if err := ats.driver.Query(ctx, query, args, rows); err != nil {
return err
}
defer rows.Close()
return sql.ScanSlice(rows, v)
}
// Modify adds a query modifier for attaching custom logic to queries.
func (ats *AITaskSelect) Modify(modifiers ...func(s *sql.Selector)) *AITaskSelect {
ats.modifiers = append(ats.modifiers, modifiers...)
return ats
}