Skip to content

Commit 8119d0a

Browse files
authored
Feat/jupyter (#17)
* jupyter notebooks * fixup * fixup * fixup * fixup * fixup * fixup * fixup * fixup
1 parent ae30ab7 commit 8119d0a

File tree

9 files changed

+146
-419
lines changed

9 files changed

+146
-419
lines changed

.github/workflows/quality.yml

Lines changed: 2 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -22,23 +22,8 @@ jobs:
2222
- name: Install dependencies
2323
run: poetry install
2424

25-
- name: Run Linter (Ruff)
26-
run: poetry run ruff check .
27-
28-
- name: Run Formatter Check (Ruff)
29-
run: poetry run ruff format --check .
30-
31-
- name: Run Static Type Checking (BasedPyright)
32-
run: poetry run basedpyright src/
33-
34-
- name: Run Static Type Checking (MyPy)
35-
run: poetry run mypy src/
36-
37-
- name: Run Import Cycle Check (Pylint)
38-
run: poetry run pylint src
39-
40-
- name: Run Tests with Coverage
41-
run: poetry run pytest
25+
- name: Run CI
26+
run: poetry run poe ci
4227

4328
- name: Upload Coverage Reports
4429
uses: actions/upload-artifact@v4

docs/01_core_concepts.ipynb

Lines changed: 54 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -39,18 +39,9 @@
3939
},
4040
{
4141
"cell_type": "code",
42-
"execution_count": 13,
42+
"execution_count": null,
4343
"metadata": {},
44-
"outputs": [
45-
{
46-
"name": "stdout",
47-
"output_type": "stream",
48-
"text": [
49-
"✅ Pydantic model created successfully from valid data.\n",
50-
"✅ Pydantic correctly raised a ValidationError for invalid data.\n"
51-
]
52-
}
53-
],
44+
"outputs": [],
5445
"source": [
5546
"from typing import Annotated\n",
5647
"\n",
@@ -72,6 +63,15 @@
7263
"# 1. Test the successful case\n",
7364
"valid_data = {\"id\": 1, \"name\": \"Alice\", \"age\": 30}\n",
7465
"user = User.model_validate(valid_data)\n",
66+
"user\n"
67+
]
68+
},
69+
{
70+
"cell_type": "code",
71+
"execution_count": null,
72+
"metadata": {},
73+
"outputs": [],
74+
"source": [
7575
"\n",
7676
"assert user.id == 1\n",
7777
"assert user.name == \"Alice\"\n",
@@ -120,19 +120,9 @@
120120
},
121121
{
122122
"cell_type": "code",
123-
"execution_count": 14,
123+
"execution_count": null,
124124
"metadata": {},
125-
"outputs": [
126-
{
127-
"name": "stdout",
128-
"output_type": "stream",
129-
"text": [
130-
"✅ `parse_user_id` returned Success for valid input.\n",
131-
"✅ `parse_user_id` returned Failure for invalid format.\n",
132-
"✅ `parse_user_id` returned Failure for invalid value.\n"
133-
]
134-
}
135-
],
125+
"outputs": [],
136126
"source": [
137127
"from returns.result import Failure, Result, Success\n",
138128
"\n",
@@ -153,18 +143,45 @@
153143
"\n",
154144
"# 1. Test the Success case\n",
155145
"success_result = parse_user_id(\"123\")\n",
146+
"success_result\n"
147+
]
148+
},
149+
{
150+
"cell_type": "code",
151+
"execution_count": null,
152+
"metadata": {},
153+
"outputs": [],
154+
"source": [
156155
"assert isinstance(success_result, Success)\n",
157156
"assert success_result.unwrap() == 123\n",
158157
"print(\"✅ `parse_user_id` returned Success for valid input.\")\n",
159158
"\n",
160159
"# 2. Test the Failure case (format)\n",
161160
"failure_result_format = parse_user_id(\"abc\")\n",
161+
"failure_result_format"
162+
]
163+
},
164+
{
165+
"cell_type": "code",
166+
"execution_count": null,
167+
"metadata": {},
168+
"outputs": [],
169+
"source": [
162170
"assert isinstance(failure_result_format, Failure)\n",
163171
"assert \"Invalid format\" in failure_result_format.failure()\n",
164172
"print(\"✅ `parse_user_id` returned Failure for invalid format.\")\n",
165173
"\n",
166174
"# 3. Test the Failure case (value)\n",
167175
"failure_result_value = parse_user_id(\"0\")\n",
176+
"failure_result_value"
177+
]
178+
},
179+
{
180+
"cell_type": "code",
181+
"execution_count": null,
182+
"metadata": {},
183+
"outputs": [],
184+
"source": [
168185
"assert isinstance(failure_result_value, Failure)\n",
169186
"assert \"must be positive\" in failure_result_value.failure()\n",
170187
"print(\"✅ `parse_user_id` returned Failure for invalid value.\")"
@@ -188,18 +205,9 @@
188205
},
189206
{
190207
"cell_type": "code",
191-
"execution_count": 15,
208+
"execution_count": null,
192209
"metadata": {},
193-
"outputs": [
194-
{
195-
"name": "stdout",
196-
"output_type": "stream",
197-
"text": [
198-
"Pipeline: ' This is a Long String of Text ' to '\"this is a !\"'\n",
199-
"✅ Pipeline correctly working\n"
200-
]
201-
}
202-
],
210+
"outputs": [],
203211
"source": [
204212
"from returns.pipeline import pipe\n",
205213
"\n",
@@ -221,7 +229,16 @@
221229
"raw_input = \" This is a Long String of Text \"\n",
222230
"\n",
223231
"# Use pipe to compose the functions into a pipeline\n",
224-
"processed_text = pipe(clean_text, truncate_text, emphasize_text)(raw_input) # pyright: ignore\n",
232+
"processed_text = pipe(clean_text, truncate_text, emphasize_text)(raw_input)\n",
233+
"processed_text\n"
234+
]
235+
},
236+
{
237+
"cell_type": "code",
238+
"execution_count": null,
239+
"metadata": {},
240+
"outputs": [],
241+
"source": [
225242
"\n",
226243
"# --- Verification ---\n",
227244
"print(f\"Pipeline: '{raw_input}' to '{processed_text}'\")\n",
@@ -261,29 +278,9 @@
261278
},
262279
{
263280
"cell_type": "code",
264-
"execution_count": 16,
281+
"execution_count": null,
265282
"metadata": {},
266-
"outputs": [
267-
{
268-
"name": "stderr",
269-
"output_type": "stream",
270-
"text": [
271-
"\u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36m__main__:<module>:19\u001b[0m - \u001b[34m\u001b[1mThis is a debug message. Useful for developers.\u001b[0m\n",
272-
"\u001b[1mINFO \u001b[0m | \u001b[36m__main__:<module>:20\u001b[0m - \u001b[1mApplication is starting up...\u001b[0m\n",
273-
"\u001b[32m\u001b[1mSUCCESS \u001b[0m | \u001b[36m__main__:<module>:21\u001b[0m - \u001b[32m\u001b[1mA task was completed successfully.\u001b[0m\n",
274-
"\u001b[33m\u001b[1mWARNING \u001b[0m | \u001b[36m__main__:<module>:22\u001b[0m - \u001b[33m\u001b[1mSomething looks a bit strange, but it's not an error.\u001b[0m\n",
275-
"\u001b[31m\u001b[1mERROR \u001b[0m | \u001b[36m__main__:<module>:23\u001b[0m - \u001b[31m\u001b[1mAn error occurred! This needs attention.\u001b[0m\n"
276-
]
277-
},
278-
{
279-
"name": "stdout",
280-
"output_type": "stream",
281-
"text": [
282-
"\n",
283-
"✅ Loguru demonstrated various log levels.\n"
284-
]
285-
}
286-
],
283+
"outputs": [],
287284
"source": [
288285
"import sys\n",
289286
"\n",

docs/02_functional_patterns.ipynb

Lines changed: 11 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,14 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"# Functional Patterns: A Python \"Ramda\" Cookbook"
7+
"# Functional Patterns"
88
]
99
},
1010
{
1111
"cell_type": "markdown",
1212
"metadata": {},
1313
"source": [
14-
"This notebook explores functional programming patterns for common data manipulation tasks, particularly for those coming from a JavaScript/TypeScript background with experience using libraries like Ramda or Lodash.\n",
15-
"\n",
16-
"Python's functional capabilities are often built directly into the language or provided by libraries like `returns`. We'll explore how to achieve the same declarative style in a way that is idiomatic to Python."
14+
"This notebook explores functional programming patterns for common data manipulation tasks."
1715
]
1816
},
1917
{
@@ -29,24 +27,16 @@
2927
"source": [
3028
"A core tenet of functional programming is to express *what* you want to do, not *how* you want to do it. Instead of writing imperative `for` loops, we use declarative approaches like `map` and `filter`.\n",
3129
"\n",
32-
"### Style 1: Pipeline-based (Ramda-like)\n",
30+
"### Style 1: Pipeline-based\n",
3331
"\n",
3432
"Using `returns.pipe` with `map` and `filter` feels very similar to Ramda's `R.pipe`. This style is excellent for long chains of transformations."
3533
]
3634
},
3735
{
3836
"cell_type": "code",
39-
"execution_count": 1,
37+
"execution_count": null,
4038
"metadata": {},
41-
"outputs": [
42-
{
43-
"name": "stdout",
44-
"output_type": "stream",
45-
"text": [
46-
"✅ Pipeline correctly transformed the list to [4, 16, 36]\n"
47-
]
48-
}
49-
],
39+
"outputs": [],
5040
"source": [
5141
"from returns.pipeline import pipe\n",
5242
"\n",
@@ -76,17 +66,9 @@
7666
},
7767
{
7868
"cell_type": "code",
79-
"execution_count": 2,
69+
"execution_count": null,
8070
"metadata": {},
81-
"outputs": [
82-
{
83-
"name": "stdout",
84-
"output_type": "stream",
85-
"text": [
86-
"✅ List comprehension correctly transformed the list to [4, 16, 36]\n"
87-
]
88-
}
89-
],
71+
"outputs": [],
9072
"source": [
9173
"numbers = [1, 2, 3, 4, 5, 6]\n",
9274
"\n",
@@ -116,17 +98,9 @@
11698
},
11799
{
118100
"cell_type": "code",
119-
"execution_count": 3,
101+
"execution_count": null,
120102
"metadata": {},
121-
"outputs": [
122-
{
123-
"name": "stdout",
124-
"output_type": "stream",
125-
"text": [
126-
"✅ Currying works as expected, creating specialized functions.\n"
127-
]
128-
}
129-
],
103+
"outputs": [],
130104
"source": [
131105
"from returns.curry import curry\n",
132106
"\n",
@@ -170,17 +144,9 @@
170144
},
171145
{
172146
"cell_type": "code",
173-
"execution_count": 4,
147+
"execution_count": null,
174148
"metadata": {},
175-
"outputs": [
176-
{
177-
"name": "stdout",
178-
"output_type": "stream",
179-
"text": [
180-
"✅ `all_pass` helper correctly combines predicates.\n"
181-
]
182-
}
183-
],
149+
"outputs": [],
184150
"source": [
185151
"from collections.abc import Callable\n",
186152
"from typing import TypeVar\n",

0 commit comments

Comments
 (0)