Spaces:
Running
Running
saving changes
Browse files- alembic.ini +147 -0
- app/auth.py +128 -4
- app/feeds.py +258 -0
- app/main.py +2 -0
- app/models.py +24 -0
- app/upload.py +65 -1
- app/utils/s3.py +22 -0
- migrations/README +1 -0
- migrations/env.py +78 -0
- migrations/script.py.mako +28 -0
- migrations/versions/f8e8dc3fc213_add_exam_subjects_reset_fields_to_users.py +28 -0
alembic.ini
ADDED
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# A generic, single database configuration.
|
2 |
+
|
3 |
+
[alembic]
|
4 |
+
# path to migration scripts.
|
5 |
+
# this is typically a path given in POSIX (e.g. forward slashes)
|
6 |
+
# format, relative to the token %(here)s which refers to the location of this
|
7 |
+
# ini file
|
8 |
+
script_location = %(here)s/migrations
|
9 |
+
|
10 |
+
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
|
11 |
+
# Uncomment the line below if you want the files to be prepended with date and time
|
12 |
+
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
|
13 |
+
# for all available tokens
|
14 |
+
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
|
15 |
+
|
16 |
+
# sys.path path, will be prepended to sys.path if present.
|
17 |
+
# defaults to the current working directory. for multiple paths, the path separator
|
18 |
+
# is defined by "path_separator" below.
|
19 |
+
prepend_sys_path = .
|
20 |
+
|
21 |
+
|
22 |
+
# timezone to use when rendering the date within the migration file
|
23 |
+
# as well as the filename.
|
24 |
+
# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
|
25 |
+
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
|
26 |
+
# string value is passed to ZoneInfo()
|
27 |
+
# leave blank for localtime
|
28 |
+
# timezone =
|
29 |
+
|
30 |
+
# max length of characters to apply to the "slug" field
|
31 |
+
# truncate_slug_length = 40
|
32 |
+
|
33 |
+
# set to 'true' to run the environment during
|
34 |
+
# the 'revision' command, regardless of autogenerate
|
35 |
+
# revision_environment = false
|
36 |
+
|
37 |
+
# set to 'true' to allow .pyc and .pyo files without
|
38 |
+
# a source .py file to be detected as revisions in the
|
39 |
+
# versions/ directory
|
40 |
+
# sourceless = false
|
41 |
+
|
42 |
+
# version location specification; This defaults
|
43 |
+
# to <script_location>/versions. When using multiple version
|
44 |
+
# directories, initial revisions must be specified with --version-path.
|
45 |
+
# The path separator used here should be the separator specified by "path_separator"
|
46 |
+
# below.
|
47 |
+
# version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
|
48 |
+
|
49 |
+
# path_separator; This indicates what character is used to split lists of file
|
50 |
+
# paths, including version_locations and prepend_sys_path within configparser
|
51 |
+
# files such as alembic.ini.
|
52 |
+
# The default rendered in new alembic.ini files is "os", which uses os.pathsep
|
53 |
+
# to provide os-dependent path splitting.
|
54 |
+
#
|
55 |
+
# Note that in order to support legacy alembic.ini files, this default does NOT
|
56 |
+
# take place if path_separator is not present in alembic.ini. If this
|
57 |
+
# option is omitted entirely, fallback logic is as follows:
|
58 |
+
#
|
59 |
+
# 1. Parsing of the version_locations option falls back to using the legacy
|
60 |
+
# "version_path_separator" key, which if absent then falls back to the legacy
|
61 |
+
# behavior of splitting on spaces and/or commas.
|
62 |
+
# 2. Parsing of the prepend_sys_path option falls back to the legacy
|
63 |
+
# behavior of splitting on spaces, commas, or colons.
|
64 |
+
#
|
65 |
+
# Valid values for path_separator are:
|
66 |
+
#
|
67 |
+
# path_separator = :
|
68 |
+
# path_separator = ;
|
69 |
+
# path_separator = space
|
70 |
+
# path_separator = newline
|
71 |
+
#
|
72 |
+
# Use os.pathsep. Default configuration used for new projects.
|
73 |
+
path_separator = os
|
74 |
+
|
75 |
+
# set to 'true' to search source files recursively
|
76 |
+
# in each "version_locations" directory
|
77 |
+
# new in Alembic version 1.10
|
78 |
+
# recursive_version_locations = false
|
79 |
+
|
80 |
+
# the output encoding used when revision files
|
81 |
+
# are written from script.py.mako
|
82 |
+
# output_encoding = utf-8
|
83 |
+
|
84 |
+
# database URL. This is consumed by the user-maintained env.py script only.
|
85 |
+
# other means of configuring database URLs may be customized within the env.py
|
86 |
+
# file.
|
87 |
+
sqlalchemy.url = 'postgresql+asyncpg://dubsway-media-analyser-db_owner:npg_9ChyJBYKafN6@ep-morning-sky-a1gyd5tp-pooler.ap-southeast-1.aws.neon.tech/dubsway-media-analyser-db?ssl=require'
|
88 |
+
|
89 |
+
|
90 |
+
[post_write_hooks]
|
91 |
+
# post_write_hooks defines scripts or Python functions that are run
|
92 |
+
# on newly generated revision scripts. See the documentation for further
|
93 |
+
# detail and examples
|
94 |
+
|
95 |
+
# format using "black" - use the console_scripts runner, against the "black" entrypoint
|
96 |
+
# hooks = black
|
97 |
+
# black.type = console_scripts
|
98 |
+
# black.entrypoint = black
|
99 |
+
# black.options = -l 79 REVISION_SCRIPT_FILENAME
|
100 |
+
|
101 |
+
# lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module
|
102 |
+
# hooks = ruff
|
103 |
+
# ruff.type = module
|
104 |
+
# ruff.module = ruff
|
105 |
+
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
|
106 |
+
|
107 |
+
# Alternatively, use the exec runner to execute a binary found on your PATH
|
108 |
+
# hooks = ruff
|
109 |
+
# ruff.type = exec
|
110 |
+
# ruff.executable = ruff
|
111 |
+
# ruff.options = check --fix REVISION_SCRIPT_FILENAME
|
112 |
+
|
113 |
+
# Logging configuration. This is also consumed by the user-maintained
|
114 |
+
# env.py script only.
|
115 |
+
[loggers]
|
116 |
+
keys = root,sqlalchemy,alembic
|
117 |
+
|
118 |
+
[handlers]
|
119 |
+
keys = console
|
120 |
+
|
121 |
+
[formatters]
|
122 |
+
keys = generic
|
123 |
+
|
124 |
+
[logger_root]
|
125 |
+
level = WARNING
|
126 |
+
handlers = console
|
127 |
+
qualname =
|
128 |
+
|
129 |
+
[logger_sqlalchemy]
|
130 |
+
level = WARNING
|
131 |
+
handlers =
|
132 |
+
qualname = sqlalchemy.engine
|
133 |
+
|
134 |
+
[logger_alembic]
|
135 |
+
level = INFO
|
136 |
+
handlers =
|
137 |
+
qualname = alembic
|
138 |
+
|
139 |
+
[handler_console]
|
140 |
+
class = StreamHandler
|
141 |
+
args = (sys.stderr,)
|
142 |
+
level = NOTSET
|
143 |
+
formatter = generic
|
144 |
+
|
145 |
+
[formatter_generic]
|
146 |
+
format = %(levelname)-5.5s [%(name)s] %(message)s
|
147 |
+
datefmt = %H:%M:%S
|
app/auth.py
CHANGED
@@ -11,6 +11,7 @@ import logging
|
|
11 |
from dotenv import load_dotenv
|
12 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
13 |
from jose import JWTError
|
|
|
14 |
|
15 |
router = APIRouter()
|
16 |
logger = logging.getLogger(__name__)
|
@@ -56,11 +57,16 @@ class SignUp(BaseModel):
|
|
56 |
name: str | None = None
|
57 |
dob: str | None = None
|
58 |
preparing_for: str | None = None
|
|
|
|
|
59 |
|
60 |
|
61 |
class Login(BaseModel):
|
62 |
email: EmailStr
|
63 |
password: str
|
|
|
|
|
|
|
64 |
|
65 |
|
66 |
class UpdateProfile(BaseModel):
|
@@ -68,6 +74,22 @@ class UpdateProfile(BaseModel):
|
|
68 |
name: str | None = None
|
69 |
dob: str | None = None
|
70 |
preparing_for: str | None = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
|
73 |
@router.put("/auth/profile")
|
@@ -83,6 +105,10 @@ async def update_profile(data: UpdateProfile,
|
|
83 |
current_user.dob = data.dob
|
84 |
if data.preparing_for is not None:
|
85 |
current_user.preparing_for = data.preparing_for
|
|
|
|
|
|
|
|
|
86 |
|
87 |
try:
|
88 |
await db.commit()
|
@@ -110,9 +136,16 @@ async def signup(data: SignUp, db: AsyncSession = Depends(get_db)):
|
|
110 |
raise HTTPException(status_code=400, detail="Email already exists")
|
111 |
|
112 |
hashed_password = pwd_context.hash(data.password)
|
113 |
-
new_user = User(
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
try:
|
118 |
db.add(new_user)
|
@@ -133,9 +166,100 @@ async def login(data: Login, db: AsyncSession = Depends(get_db)):
|
|
133 |
if not user or not pwd_context.verify(data.password, user.hashed_password):
|
134 |
raise HTTPException(status_code=401, detail="Invalid credentials")
|
135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
token = jwt.encode({"user_id": user.id}, SECRET_KEY, algorithm=ALGORITHM)
|
137 |
return {
|
138 |
"access_token": token,
|
139 |
"token_type": "bearer",
|
140 |
-
"user": {
|
|
|
|
|
|
|
|
|
|
|
141 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
from dotenv import load_dotenv
|
12 |
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
13 |
from jose import JWTError
|
14 |
+
from datetime import datetime, timedelta, timezone
|
15 |
|
16 |
router = APIRouter()
|
17 |
logger = logging.getLogger(__name__)
|
|
|
57 |
name: str | None = None
|
58 |
dob: str | None = None
|
59 |
preparing_for: str | None = None
|
60 |
+
exam: str | None = None
|
61 |
+
subjects: list[str] | None = None
|
62 |
|
63 |
|
64 |
class Login(BaseModel):
|
65 |
email: EmailStr
|
66 |
password: str
|
67 |
+
# Allow capturing preferences at sign-in if provided by UI
|
68 |
+
exam: str | None = None
|
69 |
+
subjects: list[str] | None = None
|
70 |
|
71 |
|
72 |
class UpdateProfile(BaseModel):
|
|
|
74 |
name: str | None = None
|
75 |
dob: str | None = None
|
76 |
preparing_for: str | None = None
|
77 |
+
exam: str | None = None
|
78 |
+
subjects: list[str] | None = None
|
79 |
+
|
80 |
+
|
81 |
+
class RequestPasswordReset(BaseModel):
|
82 |
+
email: EmailStr
|
83 |
+
|
84 |
+
|
85 |
+
class ConfirmPasswordReset(BaseModel):
|
86 |
+
token: str
|
87 |
+
new_password: str
|
88 |
+
|
89 |
+
|
90 |
+
class ChangePassword(BaseModel):
|
91 |
+
current_password: str
|
92 |
+
new_password: str
|
93 |
|
94 |
|
95 |
@router.put("/auth/profile")
|
|
|
105 |
current_user.dob = data.dob
|
106 |
if data.preparing_for is not None:
|
107 |
current_user.preparing_for = data.preparing_for
|
108 |
+
if data.exam is not None:
|
109 |
+
current_user.exam = data.exam
|
110 |
+
if data.subjects is not None:
|
111 |
+
current_user.subjects = ",".join([s.strip() for s in data.subjects if s and s.strip()]) or None
|
112 |
|
113 |
try:
|
114 |
await db.commit()
|
|
|
136 |
raise HTTPException(status_code=400, detail="Email already exists")
|
137 |
|
138 |
hashed_password = pwd_context.hash(data.password)
|
139 |
+
new_user = User(
|
140 |
+
email=data.email,
|
141 |
+
hashed_password=hashed_password,
|
142 |
+
mobile=data.mobile,
|
143 |
+
name=data.name,
|
144 |
+
dob=data.dob,
|
145 |
+
preparing_for=data.preparing_for,
|
146 |
+
exam=data.exam,
|
147 |
+
subjects=(",".join(data.subjects) if data.subjects else None),
|
148 |
+
)
|
149 |
|
150 |
try:
|
151 |
db.add(new_user)
|
|
|
166 |
if not user or not pwd_context.verify(data.password, user.hashed_password):
|
167 |
raise HTTPException(status_code=401, detail="Invalid credentials")
|
168 |
|
169 |
+
# Optionally update preferences at login if provided
|
170 |
+
try:
|
171 |
+
updated = False
|
172 |
+
if data.exam is not None and data.exam != user.exam:
|
173 |
+
user.exam = data.exam
|
174 |
+
updated = True
|
175 |
+
if data.subjects is not None:
|
176 |
+
subjects_joined = ",".join([s.strip() for s in data.subjects if s and s.strip()]) or None
|
177 |
+
if subjects_joined != (user.subjects or None):
|
178 |
+
user.subjects = subjects_joined
|
179 |
+
updated = True
|
180 |
+
if updated:
|
181 |
+
await db.commit()
|
182 |
+
await db.refresh(user)
|
183 |
+
except Exception:
|
184 |
+
await db.rollback()
|
185 |
+
|
186 |
token = jwt.encode({"user_id": user.id}, SECRET_KEY, algorithm=ALGORITHM)
|
187 |
return {
|
188 |
"access_token": token,
|
189 |
"token_type": "bearer",
|
190 |
+
"user": {
|
191 |
+
"id": user.id,
|
192 |
+
"email": user.email,
|
193 |
+
"exam": user.exam,
|
194 |
+
"subjects": (user.subjects.split(",") if user.subjects else []),
|
195 |
+
},
|
196 |
}
|
197 |
+
|
198 |
+
|
199 |
+
@router.post("/auth/password/request-reset")
|
200 |
+
async def request_password_reset(data: RequestPasswordReset, db: AsyncSession = Depends(get_db)):
|
201 |
+
result = await db.execute(select(User).where(User.email == data.email))
|
202 |
+
user = result.scalar_one_or_none()
|
203 |
+
if not user:
|
204 |
+
# Do not reveal whether email exists
|
205 |
+
return {"message": "If the email exists, a reset link has been sent."}
|
206 |
+
|
207 |
+
# Create a short-lived token
|
208 |
+
expires_at = datetime.now(timezone.utc) + timedelta(minutes=30)
|
209 |
+
reset_token = jwt.encode({"user_id": user.id, "pr": True, "exp": expires_at}, SECRET_KEY, algorithm=ALGORITHM)
|
210 |
+
user.password_reset_token = reset_token
|
211 |
+
user.password_reset_expires = expires_at
|
212 |
+
try:
|
213 |
+
await db.commit()
|
214 |
+
except Exception as e:
|
215 |
+
await db.rollback()
|
216 |
+
logger.error(f"Failed to store reset token: {e}")
|
217 |
+
raise HTTPException(status_code=500, detail="Internal Server Error")
|
218 |
+
|
219 |
+
# NOTE: Integrate email/SMS sending here. For now, return token for development.
|
220 |
+
return {"message": "Reset token generated", "reset_token": reset_token}
|
221 |
+
|
222 |
+
|
223 |
+
@router.post("/auth/password/confirm-reset")
|
224 |
+
async def confirm_password_reset(data: ConfirmPasswordReset, db: AsyncSession = Depends(get_db)):
|
225 |
+
try:
|
226 |
+
payload = jwt.decode(data.token, SECRET_KEY, algorithms=[ALGORITHM])
|
227 |
+
user_id = payload.get("user_id")
|
228 |
+
if not user_id or not payload.get("pr"):
|
229 |
+
raise HTTPException(status_code=400, detail="Invalid token")
|
230 |
+
except JWTError:
|
231 |
+
raise HTTPException(status_code=400, detail="Invalid or expired token")
|
232 |
+
|
233 |
+
result = await db.execute(select(User).where(User.id == user_id))
|
234 |
+
user = result.scalar_one_or_none()
|
235 |
+
if not user or user.password_reset_token != data.token:
|
236 |
+
raise HTTPException(status_code=400, detail="Invalid token")
|
237 |
+
if user.password_reset_expires and datetime.now(timezone.utc) > user.password_reset_expires:
|
238 |
+
raise HTTPException(status_code=400, detail="Token expired")
|
239 |
+
|
240 |
+
user.hashed_password = pwd_context.hash(data.new_password)
|
241 |
+
user.password_reset_token = None
|
242 |
+
user.password_reset_expires = None
|
243 |
+
try:
|
244 |
+
await db.commit()
|
245 |
+
return {"message": "Password reset successful"}
|
246 |
+
except Exception as e:
|
247 |
+
await db.rollback()
|
248 |
+
logger.error(f"Password reset error: {e}")
|
249 |
+
raise HTTPException(status_code=500, detail="Internal Server Error")
|
250 |
+
|
251 |
+
|
252 |
+
@router.post("/auth/password/change")
|
253 |
+
async def change_password(data: ChangePassword,
|
254 |
+
current_user: User = Depends(get_current_user),
|
255 |
+
db: AsyncSession = Depends(get_db)):
|
256 |
+
if not pwd_context.verify(data.current_password, current_user.hashed_password):
|
257 |
+
raise HTTPException(status_code=400, detail="Current password incorrect")
|
258 |
+
current_user.hashed_password = pwd_context.hash(data.new_password)
|
259 |
+
try:
|
260 |
+
await db.commit()
|
261 |
+
return {"message": "Password changed"}
|
262 |
+
except Exception as e:
|
263 |
+
await db.rollback()
|
264 |
+
logger.error(f"Change password error: {e}")
|
265 |
+
raise HTTPException(status_code=500, detail="Internal Server Error")
|
app/feeds.py
ADDED
@@ -0,0 +1,258 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import logging
|
3 |
+
import json
|
4 |
+
import os
|
5 |
+
from typing import List
|
6 |
+
|
7 |
+
from fastapi import APIRouter, Depends, HTTPException
|
8 |
+
from pydantic import BaseModel
|
9 |
+
from sqlalchemy import select, delete
|
10 |
+
from sqlalchemy.ext.asyncio import AsyncSession
|
11 |
+
|
12 |
+
from app.auth import get_current_user
|
13 |
+
from app.database import get_db
|
14 |
+
from app.models import Feed, User
|
15 |
+
from langchain_groq import ChatGroq
|
16 |
+
from langchain_openai import OpenAIEmbeddings
|
17 |
+
from langchain_community.vectorstores import SupabaseVectorStore
|
18 |
+
from supabase.client import create_client
|
19 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
20 |
+
from langchain_core.prompts import ChatPromptTemplate
|
21 |
+
|
22 |
+
try:
|
23 |
+
import wikipediaapi
|
24 |
+
except Exception:
|
25 |
+
wikipediaapi = None
|
26 |
+
|
27 |
+
# Logger early init (used during module init below)
|
28 |
+
logger = logging.getLogger("app.feeds")
|
29 |
+
if not logger.handlers:
|
30 |
+
handler = logging.StreamHandler()
|
31 |
+
formatter = logging.Formatter("[%(asctime)s] %(levelname)s - %(message)s")
|
32 |
+
handler.setFormatter(formatter)
|
33 |
+
logger.addHandler(handler)
|
34 |
+
logger.setLevel(logging.INFO)
|
35 |
+
|
36 |
+
# Supabase Initialization (independent of other modules)
|
37 |
+
SUPABASE_URL = os.getenv("SUPABASE_URL")
|
38 |
+
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
|
39 |
+
supabase_client = None
|
40 |
+
if SUPABASE_URL and SUPABASE_KEY:
|
41 |
+
try:
|
42 |
+
supabase_client = create_client(SUPABASE_URL, SUPABASE_KEY)
|
43 |
+
logger.info("✅ Supabase client (feeds) initialized.")
|
44 |
+
except Exception as e:
|
45 |
+
logger.warning(f"Supabase init failed in feeds: {e}")
|
46 |
+
|
47 |
+
router = APIRouter()
|
48 |
+
|
49 |
+
|
50 |
+
class FeedOut(BaseModel):
|
51 |
+
id: int
|
52 |
+
title: str
|
53 |
+
content: str
|
54 |
+
media_url: str | None = None
|
55 |
+
tags: list[str] | None = None
|
56 |
+
category: str | None = None
|
57 |
+
source: str | None = None
|
58 |
+
relevance_score: int
|
59 |
+
likes: int
|
60 |
+
comments_count: int
|
61 |
+
shares: int
|
62 |
+
|
63 |
+
class Config:
|
64 |
+
from_attributes = True
|
65 |
+
|
66 |
+
|
67 |
+
def _fallback_generate(user: User) -> List[dict]:
|
68 |
+
topics = [
|
69 |
+
"career-growth", "interview-prep", "project-ideas", "resume-tips",
|
70 |
+
"networking", "internships", "leetcode", "system-design", "ai-ml",
|
71 |
+
"cloud", "devops", "frontend", "backend"
|
72 |
+
]
|
73 |
+
base_tags = ["tips", "guide", "beginner", "advanced", "2025", "best-practices"]
|
74 |
+
|
75 |
+
user_topic = (user.preparing_for or "career-growth").lower().replace(" ", "-")
|
76 |
+
chosen_topics = list({user_topic} | set(random.sample(topics, k=4)))
|
77 |
+
|
78 |
+
items: List[dict] = []
|
79 |
+
for _ in range(25):
|
80 |
+
topic = random.choice(chosen_topics)
|
81 |
+
title = f"{topic.title()} insights for {user.name or 'you'}"
|
82 |
+
content = (
|
83 |
+
f"Actionable {topic.replace('-', ' ')} advice tailored for {user.name or 'your profile'}. "
|
84 |
+
f"Focus on consistent practice, portfolio building, and networking."
|
85 |
+
)
|
86 |
+
tags = list({topic, *(random.sample(base_tags, k=3))})
|
87 |
+
items.append({
|
88 |
+
"title": title[:200],
|
89 |
+
"content": content,
|
90 |
+
"media_url": None,
|
91 |
+
"tags": ",".join(tags),
|
92 |
+
"category": topic,
|
93 |
+
"source": "dubsway-ai",
|
94 |
+
"relevance_score": random.randint(70, 100),
|
95 |
+
"likes": random.randint(0, 50),
|
96 |
+
"comments_count": random.randint(0, 20),
|
97 |
+
"shares": random.randint(0, 10),
|
98 |
+
})
|
99 |
+
return items
|
100 |
+
|
101 |
+
|
102 |
+
def _agentic_generate(user: User) -> List[dict]:
|
103 |
+
"""Generate context-aware feeds using vector DB, public info, and LLM.
|
104 |
+
Falls back to heuristic generation if LLM not available.
|
105 |
+
"""
|
106 |
+
# 1) Gather user context
|
107 |
+
user_focus = (user.preparing_for or "career growth").strip()
|
108 |
+
|
109 |
+
# 2) Retrieve user-related docs from Supabase vector store via LangChain
|
110 |
+
doc_snippets = ""
|
111 |
+
try:
|
112 |
+
if supabase_client is not None:
|
113 |
+
embeddings = OpenAIEmbeddings()
|
114 |
+
vector_store = SupabaseVectorStore(
|
115 |
+
client=supabase_client,
|
116 |
+
embedding=embeddings,
|
117 |
+
table_name="documents",
|
118 |
+
query_name="match_documents",
|
119 |
+
)
|
120 |
+
retriever = vector_store.as_retriever(search_kwargs={"k": 8, "filter": {"user_id": user.id}})
|
121 |
+
# Use a simple seed question from the user's focus
|
122 |
+
seed_q = f"Key takeaways for {user_focus}"
|
123 |
+
retrieved = retriever.invoke(seed_q)
|
124 |
+
# retrieved may be list of docs depending on LC version
|
125 |
+
docs = retrieved if isinstance(retrieved, list) else retrieved.get("context", [])
|
126 |
+
parts: List[str] = []
|
127 |
+
for d in docs[:10]:
|
128 |
+
try:
|
129 |
+
parts.append(getattr(d, "page_content", "")[:400])
|
130 |
+
except Exception:
|
131 |
+
pass
|
132 |
+
doc_snippets = "\n".join(parts)[:4000]
|
133 |
+
except Exception as e:
|
134 |
+
logger.warning(f"Supabase retrieval failed: {e}")
|
135 |
+
|
136 |
+
# 3) Pull public info (Wikipedia summary on user focus)
|
137 |
+
public_summary = ""
|
138 |
+
try:
|
139 |
+
if wikipediaapi:
|
140 |
+
wiki = wikipediaapi.Wikipedia(language='en', user_agent='DubswayVideoAI/1.0 (contact: [email protected])')
|
141 |
+
page = wiki.page(user_focus)
|
142 |
+
if page and page.exists():
|
143 |
+
public_summary = page.summary[:1200]
|
144 |
+
except Exception as e:
|
145 |
+
logger.warning(f"Wikipedia fetch failed: {e}")
|
146 |
+
|
147 |
+
# 4) Use Groq LLM to synthesize 25 feeds
|
148 |
+
try:
|
149 |
+
llm = ChatGroq(groq_api_key=os.getenv("GROQ_API_KEY"), model_name=os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile"))
|
150 |
+
system = (
|
151 |
+
"You are a career coach assistant. Create 25 short personalized feed items that help the user grow "
|
152 |
+
"in their career. Each item should be practical and contextual to the user focus, using the given "
|
153 |
+
"context notes and public info. Output strictly JSON array with objects having keys: title, content, "
|
154 |
+
"tags (array of strings), category, source."
|
155 |
+
)
|
156 |
+
prompt = (
|
157 |
+
f"User name: {user.name or 'User'}\n"
|
158 |
+
f"User focus: {user_focus}\n\n"
|
159 |
+
f"Context from user's vector docs (may be empty):\n{doc_snippets}\n\n"
|
160 |
+
f"Public info (may be empty):\n{public_summary}\n\n"
|
161 |
+
f"Generate 25 items. Keep title <= 120 chars and content 1-2 sentences."
|
162 |
+
)
|
163 |
+
resp = llm.invoke([{"role": "system", "content": system + " Respond ONLY with a JSON array."}, {"role": "user", "content": prompt}])
|
164 |
+
text = resp.content if hasattr(resp, "content") else str(resp)
|
165 |
+
# Normalize potential markdown code fences and extract JSON array
|
166 |
+
text_stripped = text.strip()
|
167 |
+
if text_stripped.startswith("```)" ):
|
168 |
+
text_stripped = text_stripped.strip('`')
|
169 |
+
if text_stripped.startswith("```json"):
|
170 |
+
text_stripped = text_stripped[7:]
|
171 |
+
if text_stripped.startswith("```") and text_stripped.endswith("```"):
|
172 |
+
text_stripped = text_stripped[3:-3]
|
173 |
+
# Attempt to find a JSON array inside
|
174 |
+
try:
|
175 |
+
data = json.loads(text_stripped)
|
176 |
+
except Exception:
|
177 |
+
start = text_stripped.find('[')
|
178 |
+
end = text_stripped.rfind(']')
|
179 |
+
if start != -1 and end != -1 and end > start:
|
180 |
+
data = json.loads(text_stripped[start:end+1])
|
181 |
+
else:
|
182 |
+
raise
|
183 |
+
items: List[dict] = []
|
184 |
+
for it in data[:25]:
|
185 |
+
tags_joined = ",".join(it.get("tags", [])[:6]) if isinstance(it.get("tags"), list) else None
|
186 |
+
items.append({
|
187 |
+
"title": (it.get("title") or "").strip()[:200] or "Career insight",
|
188 |
+
"content": (it.get("content") or "").strip() or "Practical career tip.",
|
189 |
+
"media_url": None,
|
190 |
+
"tags": tags_joined,
|
191 |
+
"category": (it.get("category") or user_focus)[:64],
|
192 |
+
"source": (it.get("source") or "agentic-ai")[:64],
|
193 |
+
"relevance_score": random.randint(80, 100),
|
194 |
+
"likes": 0,
|
195 |
+
"comments_count": 0,
|
196 |
+
"shares": 0,
|
197 |
+
})
|
198 |
+
# Ensure we always return 25
|
199 |
+
if len(items) < 25:
|
200 |
+
items.extend(_fallback_generate(user)[: 25 - len(items)])
|
201 |
+
return items[:25]
|
202 |
+
except Exception as e:
|
203 |
+
logger.error(f"Agentic generation failed: {e}")
|
204 |
+
return _fallback_generate(user)
|
205 |
+
|
206 |
+
|
207 |
+
@router.get("/feeds", response_model=List[FeedOut])
|
208 |
+
async def get_feeds(current_user: User = Depends(get_current_user),
|
209 |
+
db: AsyncSession = Depends(get_db)):
|
210 |
+
# Auto-refresh: clear previous feeds for this user
|
211 |
+
try:
|
212 |
+
await db.execute(delete(Feed).where(Feed.user_id == current_user.id))
|
213 |
+
await db.commit()
|
214 |
+
except Exception as e:
|
215 |
+
await db.rollback()
|
216 |
+
logger.error(f"Failed deleting old feeds: {e}")
|
217 |
+
raise HTTPException(status_code=500, detail="Failed refreshing feeds")
|
218 |
+
|
219 |
+
# Generate new 25 items (agentic with fallback)
|
220 |
+
items = _agentic_generate(current_user)
|
221 |
+
|
222 |
+
# Insert into DB
|
223 |
+
try:
|
224 |
+
feed_rows = [
|
225 |
+
Feed(user_id=current_user.id, **item) for item in items
|
226 |
+
]
|
227 |
+
db.add_all(feed_rows)
|
228 |
+
await db.commit()
|
229 |
+
except Exception as e:
|
230 |
+
await db.rollback()
|
231 |
+
logger.error(f"Failed inserting feeds: {e}")
|
232 |
+
raise HTTPException(status_code=500, detail="Failed storing feeds")
|
233 |
+
|
234 |
+
# Return top 25 ordered by relevance desc, then recent
|
235 |
+
result = await db.execute(
|
236 |
+
select(Feed).where(Feed.user_id == current_user.id).order_by(Feed.relevance_score.desc(), Feed.id.desc()).limit(25)
|
237 |
+
)
|
238 |
+
rows = result.scalars().all()
|
239 |
+
|
240 |
+
# Convert comma tags to list for response
|
241 |
+
out: List[FeedOut] = []
|
242 |
+
for r in rows:
|
243 |
+
out.append(FeedOut(
|
244 |
+
id=r.id,
|
245 |
+
title=r.title,
|
246 |
+
content=r.content,
|
247 |
+
media_url=r.media_url,
|
248 |
+
tags=(r.tags.split(",") if r.tags else None),
|
249 |
+
category=r.category,
|
250 |
+
source=r.source,
|
251 |
+
relevance_score=r.relevance_score or 0,
|
252 |
+
likes=r.likes or 0,
|
253 |
+
comments_count=r.comments_count or 0,
|
254 |
+
shares=r.shares or 0,
|
255 |
+
))
|
256 |
+
return out
|
257 |
+
|
258 |
+
|
app/main.py
CHANGED
@@ -12,6 +12,7 @@ from app.auth import router as auth_router
|
|
12 |
from app.upload import router as upload_router
|
13 |
from app.dashboard import router as dashboard_router
|
14 |
from app.agent.custom_chatbot import router as custom_chatbot_router
|
|
|
15 |
# from app.routes import pdf_ingestions
|
16 |
|
17 |
# Initialize logger
|
@@ -40,6 +41,7 @@ app.include_router(auth_router, prefix="/api", tags=["Auth"])
|
|
40 |
app.include_router(upload_router, prefix="/api", tags=["Upload"])
|
41 |
app.include_router(dashboard_router, prefix="/api", tags=["Dashboard"])
|
42 |
app.include_router(custom_chatbot_router, prefix="/api", tags=["Custom Chatbot"])
|
|
|
43 |
# app.include_router(pdf_ingestion.router, prefix="/api", tags=["PDF Ingestion"])
|
44 |
|
45 |
@app.get("/")
|
|
|
12 |
from app.upload import router as upload_router
|
13 |
from app.dashboard import router as dashboard_router
|
14 |
from app.agent.custom_chatbot import router as custom_chatbot_router
|
15 |
+
from app.feeds import router as feeds_router
|
16 |
# from app.routes import pdf_ingestions
|
17 |
|
18 |
# Initialize logger
|
|
|
41 |
app.include_router(upload_router, prefix="/api", tags=["Upload"])
|
42 |
app.include_router(dashboard_router, prefix="/api", tags=["Dashboard"])
|
43 |
app.include_router(custom_chatbot_router, prefix="/api", tags=["Custom Chatbot"])
|
44 |
+
app.include_router(feeds_router, prefix="/api", tags=["Feeds"])
|
45 |
# app.include_router(pdf_ingestion.router, prefix="/api", tags=["PDF Ingestion"])
|
46 |
|
47 |
@app.get("/")
|
app/models.py
CHANGED
@@ -12,6 +12,12 @@ class User(Base):
|
|
12 |
name = Column(String, nullable=True)
|
13 |
dob = Column(String, nullable=True)
|
14 |
preparing_for = Column(String, nullable=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
|
17 |
class VideoUpload(Base):
|
@@ -23,3 +29,21 @@ class VideoUpload(Base):
|
|
23 |
status = Column(String, default="pending") # pending, processing, completed
|
24 |
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
25 |
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
name = Column(String, nullable=True)
|
13 |
dob = Column(String, nullable=True)
|
14 |
preparing_for = Column(String, nullable=True)
|
15 |
+
# Additional preferences/fields
|
16 |
+
exam = Column(String, nullable=True)
|
17 |
+
subjects = Column(Text, nullable=True) # comma-separated subjects
|
18 |
+
# Password reset support
|
19 |
+
password_reset_token = Column(String, nullable=True, index=True)
|
20 |
+
password_reset_expires = Column(DateTime(timezone=True), nullable=True)
|
21 |
|
22 |
|
23 |
class VideoUpload(Base):
|
|
|
29 |
status = Column(String, default="pending") # pending, processing, completed
|
30 |
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
31 |
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
32 |
+
|
33 |
+
|
34 |
+
class Feed(Base):
|
35 |
+
__tablename__ = "feeds"
|
36 |
+
id = Column(Integer, primary_key=True, index=True)
|
37 |
+
user_id = Column(Integer, ForeignKey("users.id"), index=True, nullable=False)
|
38 |
+
title = Column(String, nullable=False)
|
39 |
+
content = Column(Text, nullable=False)
|
40 |
+
media_url = Column(Text, nullable=True)
|
41 |
+
tags = Column(Text, nullable=True) # comma-separated list
|
42 |
+
category = Column(String, nullable=True)
|
43 |
+
source = Column(String, nullable=True)
|
44 |
+
relevance_score = Column(Integer, default=0) # 0-100
|
45 |
+
likes = Column(Integer, default=0)
|
46 |
+
comments_count = Column(Integer, default=0)
|
47 |
+
shares = Column(Integer, default=0)
|
48 |
+
created_at = Column(DateTime(timezone=True), server_default=func.now())
|
49 |
+
updated_at = Column(DateTime(timezone=True), onupdate=func.now())
|
app/upload.py
CHANGED
@@ -3,7 +3,10 @@ from sqlalchemy.ext.asyncio import AsyncSession
|
|
3 |
from sqlalchemy.future import select
|
4 |
from app.database import get_db
|
5 |
from app.models import VideoUpload
|
6 |
-
from .utils.s3 import upload_to_s3
|
|
|
|
|
|
|
7 |
import uuid
|
8 |
import os
|
9 |
|
@@ -51,3 +54,64 @@ async def upload_video(
|
|
51 |
except Exception as e:
|
52 |
await db.rollback()
|
53 |
raise HTTPException(status_code=500, detail=str(e))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
from sqlalchemy.future import select
|
4 |
from app.database import get_db
|
5 |
from app.models import VideoUpload
|
6 |
+
from .utils.s3 import upload_to_s3, delete_s3_key, key_from_url
|
7 |
+
from app.auth import get_current_user
|
8 |
+
from app.models import User
|
9 |
+
from sqlalchemy import delete as sqldelete
|
10 |
import uuid
|
11 |
import os
|
12 |
|
|
|
54 |
except Exception as e:
|
55 |
await db.rollback()
|
56 |
raise HTTPException(status_code=500, detail=str(e))
|
57 |
+
|
58 |
+
|
59 |
+
@router.delete("/uploads/{video_id}")
|
60 |
+
async def delete_uploaded_video(video_id: int,
|
61 |
+
current_user: User = Depends(get_current_user),
|
62 |
+
db: AsyncSession = Depends(get_db)):
|
63 |
+
result = await db.execute(select(VideoUpload).where(VideoUpload.id == video_id))
|
64 |
+
video: VideoUpload | None = result.scalar_one_or_none()
|
65 |
+
if not video:
|
66 |
+
raise HTTPException(status_code=404, detail="Video not found")
|
67 |
+
if video.user_id != current_user.id:
|
68 |
+
raise HTTPException(status_code=403, detail="Not allowed")
|
69 |
+
|
70 |
+
# Delete from S3 if present
|
71 |
+
v_key = key_from_url(video.video_url)
|
72 |
+
p_key = key_from_url(video.pdf_url)
|
73 |
+
try:
|
74 |
+
if v_key:
|
75 |
+
delete_s3_key(v_key)
|
76 |
+
if p_key:
|
77 |
+
delete_s3_key(p_key)
|
78 |
+
except Exception:
|
79 |
+
pass
|
80 |
+
|
81 |
+
# Delete DB row
|
82 |
+
try:
|
83 |
+
await db.delete(video)
|
84 |
+
await db.commit()
|
85 |
+
return {"message": "Upload deleted"}
|
86 |
+
except Exception as e:
|
87 |
+
await db.rollback()
|
88 |
+
raise HTTPException(status_code=500, detail=str(e))
|
89 |
+
|
90 |
+
|
91 |
+
@router.delete("/uploads/{video_id}/pdf")
|
92 |
+
async def delete_uploaded_pdf(video_id: int,
|
93 |
+
current_user: User = Depends(get_current_user),
|
94 |
+
db: AsyncSession = Depends(get_db)):
|
95 |
+
result = await db.execute(select(VideoUpload).where(VideoUpload.id == video_id))
|
96 |
+
video: VideoUpload | None = result.scalar_one_or_none()
|
97 |
+
if not video:
|
98 |
+
raise HTTPException(status_code=404, detail="Upload not found")
|
99 |
+
if video.user_id != current_user.id:
|
100 |
+
raise HTTPException(status_code=403, detail="Not allowed")
|
101 |
+
|
102 |
+
p_key = key_from_url(video.pdf_url)
|
103 |
+
if not p_key:
|
104 |
+
return {"message": "No PDF to delete"}
|
105 |
+
try:
|
106 |
+
delete_s3_key(p_key)
|
107 |
+
except Exception:
|
108 |
+
pass
|
109 |
+
# Clear pdf_url field
|
110 |
+
video.pdf_url = ""
|
111 |
+
try:
|
112 |
+
await db.commit()
|
113 |
+
await db.refresh(video)
|
114 |
+
return {"message": "PDF deleted", "video_id": video.id}
|
115 |
+
except Exception as e:
|
116 |
+
await db.rollback()
|
117 |
+
raise HTTPException(status_code=500, detail=str(e))
|
app/utils/s3.py
CHANGED
@@ -18,3 +18,25 @@ def upload_to_s3(file, key):
|
|
18 |
def upload_pdf_bytes(data: bytes, key: str):
|
19 |
s3.put_object(Bucket=bucket, Key=key, Body=data, ContentType="application/pdf")
|
20 |
return f"https://{bucket}.s3.amazonaws.com/{key}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
def upload_pdf_bytes(data: bytes, key: str):
|
19 |
s3.put_object(Bucket=bucket, Key=key, Body=data, ContentType="application/pdf")
|
20 |
return f"https://{bucket}.s3.amazonaws.com/{key}"
|
21 |
+
|
22 |
+
|
23 |
+
def delete_s3_key(key: str):
|
24 |
+
if not key:
|
25 |
+
return
|
26 |
+
s3.delete_object(Bucket=bucket, Key=key)
|
27 |
+
|
28 |
+
|
29 |
+
def key_from_url(url: str) -> str | None:
|
30 |
+
if not url:
|
31 |
+
return None
|
32 |
+
prefix = f"https://{bucket}.s3.amazonaws.com/"
|
33 |
+
if url.startswith(prefix):
|
34 |
+
return url[len(prefix):]
|
35 |
+
# Try generic split for virtual-hosted–style URLs
|
36 |
+
try:
|
37 |
+
parts = url.split('.s3.amazonaws.com/', 1)
|
38 |
+
if len(parts) == 2:
|
39 |
+
return parts[1]
|
40 |
+
except Exception:
|
41 |
+
pass
|
42 |
+
return None
|
migrations/README
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Generic single-database configuration.
|
migrations/env.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from logging.config import fileConfig
|
2 |
+
|
3 |
+
from sqlalchemy import engine_from_config
|
4 |
+
from sqlalchemy import pool
|
5 |
+
|
6 |
+
from alembic import context
|
7 |
+
|
8 |
+
# this is the Alembic Config object, which provides
|
9 |
+
# access to the values within the .ini file in use.
|
10 |
+
config = context.config
|
11 |
+
|
12 |
+
# Interpret the config file for Python logging.
|
13 |
+
# This line sets up loggers basically.
|
14 |
+
if config.config_file_name is not None:
|
15 |
+
fileConfig(config.config_file_name)
|
16 |
+
|
17 |
+
# add your model's MetaData object here
|
18 |
+
# for 'autogenerate' support
|
19 |
+
# from myapp import mymodel
|
20 |
+
# target_metadata = mymodel.Base.metadata
|
21 |
+
target_metadata = None
|
22 |
+
|
23 |
+
# other values from the config, defined by the needs of env.py,
|
24 |
+
# can be acquired:
|
25 |
+
# my_important_option = config.get_main_option("my_important_option")
|
26 |
+
# ... etc.
|
27 |
+
|
28 |
+
|
29 |
+
def run_migrations_offline() -> None:
|
30 |
+
"""Run migrations in 'offline' mode.
|
31 |
+
|
32 |
+
This configures the context with just a URL
|
33 |
+
and not an Engine, though an Engine is acceptable
|
34 |
+
here as well. By skipping the Engine creation
|
35 |
+
we don't even need a DBAPI to be available.
|
36 |
+
|
37 |
+
Calls to context.execute() here emit the given string to the
|
38 |
+
script output.
|
39 |
+
|
40 |
+
"""
|
41 |
+
url = config.get_main_option("sqlalchemy.url")
|
42 |
+
context.configure(
|
43 |
+
url=url,
|
44 |
+
target_metadata=target_metadata,
|
45 |
+
literal_binds=True,
|
46 |
+
dialect_opts={"paramstyle": "named"},
|
47 |
+
)
|
48 |
+
|
49 |
+
with context.begin_transaction():
|
50 |
+
context.run_migrations()
|
51 |
+
|
52 |
+
|
53 |
+
def run_migrations_online() -> None:
|
54 |
+
"""Run migrations in 'online' mode.
|
55 |
+
|
56 |
+
In this scenario we need to create an Engine
|
57 |
+
and associate a connection with the context.
|
58 |
+
|
59 |
+
"""
|
60 |
+
connectable = engine_from_config(
|
61 |
+
config.get_section(config.config_ini_section, {}),
|
62 |
+
prefix="sqlalchemy.",
|
63 |
+
poolclass=pool.NullPool,
|
64 |
+
)
|
65 |
+
|
66 |
+
with connectable.connect() as connection:
|
67 |
+
context.configure(
|
68 |
+
connection=connection, target_metadata=target_metadata
|
69 |
+
)
|
70 |
+
|
71 |
+
with context.begin_transaction():
|
72 |
+
context.run_migrations()
|
73 |
+
|
74 |
+
|
75 |
+
if context.is_offline_mode():
|
76 |
+
run_migrations_offline()
|
77 |
+
else:
|
78 |
+
run_migrations_online()
|
migrations/script.py.mako
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""${message}
|
2 |
+
|
3 |
+
Revision ID: ${up_revision}
|
4 |
+
Revises: ${down_revision | comma,n}
|
5 |
+
Create Date: ${create_date}
|
6 |
+
|
7 |
+
"""
|
8 |
+
from typing import Sequence, Union
|
9 |
+
|
10 |
+
from alembic import op
|
11 |
+
import sqlalchemy as sa
|
12 |
+
${imports if imports else ""}
|
13 |
+
|
14 |
+
# revision identifiers, used by Alembic.
|
15 |
+
revision: str = ${repr(up_revision)}
|
16 |
+
down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
|
17 |
+
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
|
18 |
+
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
|
19 |
+
|
20 |
+
|
21 |
+
def upgrade() -> None:
|
22 |
+
"""Upgrade schema."""
|
23 |
+
${upgrades if upgrades else "pass"}
|
24 |
+
|
25 |
+
|
26 |
+
def downgrade() -> None:
|
27 |
+
"""Downgrade schema."""
|
28 |
+
${downgrades if downgrades else "pass"}
|
migrations/versions/f8e8dc3fc213_add_exam_subjects_reset_fields_to_users.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""add exam/subjects & reset fields to users
|
2 |
+
|
3 |
+
Revision ID: f8e8dc3fc213
|
4 |
+
Revises:
|
5 |
+
Create Date: 2025-09-07 16:09:14.407808
|
6 |
+
|
7 |
+
"""
|
8 |
+
from typing import Sequence, Union
|
9 |
+
|
10 |
+
from alembic import op
|
11 |
+
import sqlalchemy as sa
|
12 |
+
|
13 |
+
|
14 |
+
# revision identifiers, used by Alembic.
|
15 |
+
revision: str = 'f8e8dc3fc213'
|
16 |
+
down_revision: Union[str, Sequence[str], None] = None
|
17 |
+
branch_labels: Union[str, Sequence[str], None] = None
|
18 |
+
depends_on: Union[str, Sequence[str], None] = None
|
19 |
+
|
20 |
+
|
21 |
+
def upgrade() -> None:
|
22 |
+
"""Upgrade schema."""
|
23 |
+
pass
|
24 |
+
|
25 |
+
|
26 |
+
def downgrade() -> None:
|
27 |
+
"""Downgrade schema."""
|
28 |
+
pass
|