diff --git a/.gitignore b/.gitignore
index 296646958d4714f4ea473e06fdaf259b42b4823f..6ff4167abb3e24c835996592682b20169502584c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
 result
 .vscode
 CREDENTIALS.json
+__pycache__
diff --git a/default.nix b/default.nix
index bb5d34d32ac925a5cba51278873013b13c0a3f64..f1ca971fda710769590feef599d60cc5483815c2 100644
--- a/default.nix
+++ b/default.nix
@@ -3,6 +3,10 @@
     url = "https://github.com/NixOS/nixpkgs/archive/21.11.tar.gz";
     sha256 = "sha256:162dywda2dvfj1248afxc45kcrg83appjd0nmdb541hl7rnncf02";
   }) {}
+, kapack ? import (fetchTarball {
+    url = "https://github.com/oar-team/nur-kapack/archive/052fb35eb29228d9e4ea8afa09e9f0e390782cbd.tar.gz";
+    sha256 = "sha256:0bvsgm5wv5zh3isi51sxhyryxh6g0x29id4f68c07nwvsq6qlmr9";
+  }) {inherit pkgs;}
 }:
 
 let
@@ -25,11 +29,12 @@ in rec {
       flit
     ];
     propagatedBuildInputs = with pyPkgs; [
-      ics
+      icalendar
       pandas
       requests
       click
       beautifulsoup4
+      kapack.procset
     ];
   };
 
@@ -37,6 +42,7 @@ in rec {
     buildInputs = with pyPkgs; [
       ipython
       lflex_celcat_survival
+      ipdb
     ];
   };
 
diff --git a/flake.lock b/flake.lock
index 175d4d05026d6aa5e99378f38fa960b374c060c2..da4f0ab9befc5073d5662fcad13ff765ce9300c8 100644
--- a/flake.lock
+++ b/flake.lock
@@ -17,17 +17,17 @@
     },
     "nixpkgs": {
       "locked": {
-        "lastModified": 1638239011,
-        "narHash": "sha256-AjhmbT4UBlJWqxY0ea8a6GU2C2HdKUREkG43oRr3TZg=",
+        "lastModified": 1717179513,
+        "narHash": "sha256-vboIEwIQojofItm2xGCdZCzW96U85l9nDW3ifMuAIdM=",
         "owner": "NixOS",
         "repo": "nixpkgs",
-        "rev": "a7ecde854aee5c4c7cd6177f54a99d2c1ff28a31",
+        "rev": "63dacb46bf939521bdc93981b4cbb7ecb58427a0",
         "type": "github"
       },
       "original": {
         "owner": "NixOS",
+        "ref": "24.05",
         "repo": "nixpkgs",
-        "rev": "a7ecde854aee5c4c7cd6177f54a99d2c1ff28a31",
         "type": "github"
       }
     },
diff --git a/flake.nix b/flake.nix
index b7f62eea3e2f8c96f8478683758fe5706c1a3c86..626892512c43d914159c149bddf6343d0cdb3401 100644
--- a/flake.nix
+++ b/flake.nix
@@ -1,6 +1,6 @@
 {
   inputs = {
-    nixpkgs.url = "github:NixOS/nixpkgs?rev=a7ecde854aee5c4c7cd6177f54a99d2c1ff28a31";
+    nixpkgs.url = "github:NixOS/nixpkgs?ref=24.05";
     flake-utils.url = "github:numtide/flake-utils";
   };
 
diff --git a/lflex_celcat_survival/__init__.py b/lflex_celcat_survival/__init__.py
index 09dcaf8c295ef23d53f215f7d7f4c73ef71d35a7..a2abd8c7d3e473258823a7769a67aa637c059c8a 100644
--- a/lflex_celcat_survival/__init__.py
+++ b/lflex_celcat_survival/__init__.py
@@ -2,4 +2,4 @@ from . import auth
 from . import course_request
 from . import events
 from . import fetch
-from . import ics
+from . import slot_parse
diff --git a/lflex_celcat_survival/auth.py b/lflex_celcat_survival/auth.py
index 0bd608fecd7fb70676bb04fa8438c8966b9c5590..8f8ceca1089790dd7e753059056487194a3b1b8e 100644
--- a/lflex_celcat_survival/auth.py
+++ b/lflex_celcat_survival/auth.py
@@ -8,7 +8,8 @@ def parse_credentials_from_file(credentials_filename):
         credentials_dict = json.load(f)
         username = credentials_dict['username']
         password = credentials_dict['password']
-    return username, password
+        teacher_code = credentials_dict['teacher_code']
+    return username, password, teacher_code
 
 def create_authenticated_session(username, password):
     s = requests.Session()
diff --git a/lflex_celcat_survival/cmd/fetch_celcat.py b/lflex_celcat_survival/cmd/fetch_celcat.py
index d412bc51699e5c52cd3a440ad791364b6005c532..38a806dd56a0a8eba18c7f7098bb2922d4c1e796 100755
--- a/lflex_celcat_survival/cmd/fetch_celcat.py
+++ b/lflex_celcat_survival/cmd/fetch_celcat.py
@@ -2,54 +2,58 @@
 import click
 import logging
 import lflex_celcat_survival as lcs
+import pandas as pd
+import sys
 
 @click.command()
 @click.argument('course_request_file')
 @click.argument('credentials_file')
 @click.option('--json', '--json-raw', default=None, help='If set, raw CELCAT events are written as JSON to this file.')
-@click.option('--csv-raw', default=None, help='If set, raw (unfiltered) events are written as CSV to this file.')
 @click.option('--csv', default=None, help='If set, filteret events are written as CSV to this file.')
 @click.option('--ics', default=None, help='If set, filtered events are written as ICS to this file.')
 @click.option('--csv-no-description', is_flag=True, default=False, help='If set, CSV outputs will not contain the description column.')
-def main(course_request_file, credentials_file, json, csv_raw, csv, ics, csv_no_description):
+def main(course_request_file, credentials_file, json, csv, ics, csv_no_description):
     logging.basicConfig(level=logging.INFO)
+    pd.set_option('display.max_columns', None)
+    pd.set_option('display.max_rows', None)
+    pd.set_option('display.width', None)
 
-    req = lcs.course_request.CourseRequest(course_request_file)
-    if all(o is None for o in [json, csv_raw, csv, ics]):
-        logging.warning('No option set, doing nothing.')
-        return
+    if all(o is None for o in [json, csv, ics]):
+       logging.warning('No option set, doing nothing.')
+       return
 
-    username, password = lcs.auth.parse_credentials_from_file(credentials_file)
+    username, password, teacher_code = lcs.auth.parse_credentials_from_file(credentials_file)
     session = lcs.auth.create_authenticated_session(username, password)
 
-    celcat_raw_response = req.do_request(session)
+    requested_slots_df = lcs.slot_parse.read_weekslot_csv(course_request_file, 'fr', 2)
+    celcat_slots, celcat_raw_response = lcs.events.request_slots_by_mod_code(requested_slots_df, session)
+
     if json is not None:
         with open(json, 'w') as f:
             f.write(celcat_raw_response)
-    if all(o is None for o in [csv_raw, csv, ics]):
-        return
-
-    csv_columns_to_drop = []
-    if csv_no_description:
-        csv_columns_to_drop = ['description']
-
-    celcat_events = lcs.events.CelcatEvents(celcat_raw_response)
-    if csv_raw is not None:
-        celcat_events.df.drop(columns=csv_columns_to_drop).to_csv(csv_raw, index=False)
-    if all(o is None for o in [csv, ics]):
-        return
 
-    filtered_celcat_events = lcs.events.FilteredCelcatEvents(req, celcat_events)
-    filtered_celcat_events.check_expected_nb_timeslots()
+    # slots listed in entry file but absent from celcat
+    slots_not_in_celcat = celcat_slots['slot_in_celcat'].isna()
+    nb_slots_not_in_celcat = slots_not_in_celcat.sum()
+    if nb_slots_not_in_celcat > 0:
+        logging.warning('Some defined slots are not in CELCAT!')
+        print(celcat_slots[slots_not_in_celcat], file=sys.stderr)
+
+    # slots listed in entry file and on celcat, but with no reserved room
+    slots_without_reserved_rooms = celcat_slots['room_parsed'] == 'unset'
+    nb_slots_without_reserved_rooms = slots_without_reserved_rooms.sum()
+    if nb_slots_without_reserved_rooms > 0:
+        logging.warning('Some slots are in CELCAT but there is no room reserved for them!')
+        print(celcat_slots[slots_without_reserved_rooms], file=sys.stderr)
+
+    cal_events = lcs.events.events_to_calendar_df(celcat_slots)
     if csv is not None:
-        filtered_celcat_events.df.drop(columns=csv_columns_to_drop).to_csv(csv, index=False)
-    if all(o is None for o in [ics]):
-        return
+        cal_events.to_csv(csv, index=False)
 
-    calendar = lcs.ics.course_df_to_ics(filtered_celcat_events.df)
+    calendar = lcs.events.calendar_df_to_ics(cal_events)
     if ics is not None:
         with open(ics, 'w') as f:
-            f.write(str(calendar))
+            f.write(calendar.to_ical().decode('utf-8'))
 
 if __name__ == "__main__":
     main()
diff --git a/lflex_celcat_survival/events.py b/lflex_celcat_survival/events.py
index a9d725b5a0a7b836fbed511b5a2b519a6b7cbeca..f28b4971c6261b8e73b1e1a9cf1ccda545bd8989 100644
--- a/lflex_celcat_survival/events.py
+++ b/lflex_celcat_survival/events.py
@@ -1,9 +1,22 @@
+import datetime
+import hashlib
+from html import unescape
+import icalendar
+from io import StringIO
+import itertools
 import logging
+import math
 import pandas as pd
+import re
+from . import fetch
+
+ROOM_RE = re.compile(r'^(?:FSI|F2SMH) / (.*)$')
+COURSE_TYPE_RE = re.compile(r'COURS|COURS/TD|TD|TP|CONTROLE CONTINU|CONTROLE PARTIEL')
+STUDENT_GROUP_RE = re.compile(r'K?IN[A-Z0-9]+')
 
 class CelcatEvents:
     def __init__(self, celcat_raw_response):
-        self.df = pd.read_json(celcat_raw_response)
+        self.df = pd.read_json(StringIO(celcat_raw_response))
         self.df['start'] = self.df['start'].astype('datetime64[ns]')
         self.df['end'] = self.df['end'].astype('datetime64[ns]')
         self.df = self.df[["start", "end", "allDay", "description", "eventCategory", "modules"]]
@@ -15,7 +28,7 @@ class FilteredCelcatEvents:
         self.crossed_df = celcat_events.df.merge(course_request.df, how='cross')
 
         # parse descriptions
-        parsed_desc_df = self.crossed_df.apply(FilteredCelcatEvents.parse_description, axis=1)
+        parsed_desc_df = self.crossed_df.apply(parse_description, axis=1)
         self.crossed_df = pd.concat([self.crossed_df.reset_index(drop=True), parsed_desc_df], axis=1)
 
         self.crossed_df['keep'] = self.crossed_df.apply(lambda row: FilteredCelcatEvents.timeslot_matches_course(row), axis=1)
@@ -60,44 +73,146 @@ class FilteredCelcatEvents:
             problematic_time_slots = problematic_time_slots.sort_values(by=['course_request_id', 'start'])[['course_request_id', 'module_apogee', 'module_readable', 'start', 'end', 'course_type', 'group']]
             logging.warning(f'\n{problematic_time_slots}')
 
-    def parse_description(row):
-        '''
-        Expecting an HTML text with this information, separated by HTML/CRCF line breaks:
-        - (The room where the course takes place): optional
-        - The apogee code of the course and its readable name
-        - A list of student groups that should attend this course
-        - The course type
-
-        Example: 'FSI / U3-01\r\n\r\n<br />\r\n\r\nKINX7AD1 - Parall&#233;lisme [KINX7AD1]\r\n\r\n<br />\r\n\r\nKINB7TPA41<br />KINB7TPA42\r\n\r\n<br />\r\n\r\nTD\r\n'
-        '''
-
-        desc = row['description'].replace('\n', '').replace('\r', '')
-        fields = [x.strip() for x in desc.split('<br />')]
-
-        room = 'unset'
-        groups_joined = 'unset'
-        course_type = 'unset'
-
-        if len(fields) == 0:
-            raise ValueError(f'There should be at least 1 field, but fields are {fields}')
-        elif len(fields) == 1:
-            # probably not a course. examples: "CONGES\r\n" or "FERIE\r\n"
-            course_type = fields[0]
-        else:
-            # first field should be the room, but this is not always set
-            room = 'unset'
-            if fields[0].startswith('FSI / '):
-                room = fields[0].replace('FSI / ', '')
-                fields = fields[1:]
-
-            # let us assume that the second field is the course name
+def parse_description(row):
+    '''
+    Expecting an HTML text with this information, separated by HTML/CRCF line breaks:
+    - (The rooms where the course takes place): optional and there can be multiple rooms
+    - The apogee code of the course and its readable name
+    - A list of student groups that should attend this course
+    - (The teacher name): optional
+    - The course type
+    - (Random misc. info): optional
+
+    Example: 'FSI / U3-01\r\n\r\n<br />\r\n\r\nKINX7AD1 - Parall&#233;lisme [KINX7AD1]\r\n\r\n<br />\r\n\r\nKINB7TPA41<br />KINB7TPA42\r\n\r\n<br />\r\n\r\nTD\r\n'
+             'FSI / Amphi GRIGNARD (bat 2A)\r\n\r\n<br />\r\n\r\nKINXIB11 - Bas\r\n\r\n<br />\r\n\r\nINXIB11A\r\n\r\n<br />\r\n\r\nCOLLET CHRISTOPHE\r\n\r\n<br />\r\n\r\nCOURS\r\n\r\n<br />\r\n\r\nSem 36 &#224; 42 partie syst&#232;me\nSem 43 &#224; 50 parti Archi\r\n'
+             'FSI / Amphi VANDEL (U2-A4)<br />FSI / U2-115\r\n\r\n<br />\r\n\r\nKINXIB11 - Bas\r\n\r\n<br />\r\n\r\nINXIB11A\r\n\r\n<br />\r\n\r\nCOLLET CHRISTOPHE\r\n\r\n<br />\r\n\r\nCONTROLE CONTINU\r\n\r\n<br />\r\n\r\nSalle TD en U2 pour ESH 22012044, 22307815, 22304984, 22400685, 22307573\nPartie syst&#232;me CC1 = Sem39, CC2=Sem42 et CC4 = Sem45\nPartie Archi CC3=Sem48 et CC4 = Sem50\r\n'
+             'FSI / U3-105\r\n\r\n<br />\r\n\r\nKINX7AD1 - Parall&#233;lisme\r\n\r\n<br />\r\n\r\nKINM7CM<br />KINM7TDA5\r\n\r\n<br />\r\n\r\nTD\r\n'
+    '''
+
+    desc = unescape(row['description']).replace('\n', '').replace('\r', '')
+    fields = [x.strip() for x in desc.split('<br />')]
+    preparse_fields = fields[:]
+
+    rooms = []
+    teacher = 'unset'
+    groups = []
+    course_type = 'unset'
+
+    if len(fields) == 0:
+        raise ValueError(f'There should be at least 1 field, but fields are {fields}')
+    elif len(fields) == 1:
+        # probably not a course. examples: "CONGES\r\n" or "FERIE\r\n"
+        course_type = fields[0]
+    else:
+        # first fields should be the room, but this is not always set
+        while (m := ROOM_RE.match(fields[0])) is not None:
+            rooms.append(m[1])
             fields = fields[1:]
 
-            # last field should be the course type
-            course_type = fields[-1]
+        # assume that the next field is the course name, and skip it
+        fields = fields[1:]
+
+        # skip notes at the end of the fields until they look like a course type
+        while COURSE_TYPE_RE.match(fields[-1]) is None:
+            fields = fields[:-1]
+            if len(fields) <= 0:
+                break
+
+        # last field is a course type
+        course_type = fields[-1]
+        fields = fields[:-1]
 
-            # all remaining fields should be student groups
-            groups = fields[:-1]
-            groups_joined = ' '.join(groups)
+        # the last field may be a teacher, but this is optional
+        if STUDENT_GROUP_RE.match(fields[-1]) is None:
+            teacher = fields[-1]
+            fields = fields[:-1]
+
+        # all remaining fields should be student groups
+        groups = []
+        while len(fields) > 0 and (m := STUDENT_GROUP_RE.match(fields[0])) is not None:
+            groups.append(m[0])
+            fields = fields[1:]
 
-        return pd.Series([room, course_type, groups_joined], index=['room_parsed', 'course_type_parsed', 'groups_parsed'])
+    if len(rooms) == 0:
+        rooms = ['unset']
+    if len(groups) == 0:
+        groups = ['unset']
+
+    return pd.Series([rooms, teacher, course_type, groups], index=['rooms_parsed', 'teacher_parsed', 'course_type_parsed', 'groups_parsed'])
+
+def request_slots_by_mod_code(flat_slot_df, session):
+    subject_codes = list(flat_slot_df['mod_code'].dropna().unique())
+    min_start_dt = flat_slot_df['start_dt'].min()
+    max_end_dt = flat_slot_df['end_dt'].max()
+
+    raw_response = fetch.do_celcat_request_subjects(min_start_dt, max_end_dt, subject_codes, session)
+
+    celcat_slots = CelcatEvents(raw_response)
+    celcat_df = celcat_slots.df
+    parsed_df = celcat_df.apply(parse_description, axis=1)
+    celcat_df = pd.concat([celcat_df.reset_index(drop=True), parsed_df.reset_index(drop=True)], axis=1).reset_index(drop=True)
+
+    flat_celcat_rows = []
+    for _, row in celcat_df.iterrows():
+        for room_parsed, group_parsed, module in itertools.product(row['rooms_parsed'], row['groups_parsed'], row['modules']):
+            flat_celcat_rows.append({
+                'start_dt': row['start'],
+                'end_dt': row['end'],
+                'eventCategory': row['eventCategory'],
+                'room_parsed': room_parsed,
+                'teacher_parsed': row['teacher_parsed'],
+                'course_type_parsed': row['course_type_parsed'],
+                'student_group': group_parsed,
+                'mod_code': module,
+                'slot_in_celcat': True
+            })
+    flat_celcat_df = pd.DataFrame(flat_celcat_rows)
+    merged = pd.merge(flat_slot_df, flat_celcat_df, how='left')
+    merged = merged.sort_values(by=['start_dt', 'end_dt'])
+    return merged, raw_response
+
+def events_to_calendar_df(events):
+    cal_events = []
+    for _, row in events.iterrows():
+        subject = ", ".join([
+            f"{row['display_name']}",
+            f"{row['room_parsed']}",
+            f"{row['student_group']}",
+        ])
+        if math.isnan(row['slot_in_celcat']):
+            subject = f"NOT IN CELCAT ! {subject}"
+
+        cal_events.append({
+            'subject': subject,
+            'start_dt': row['start_dt'],
+            'end_dt': row['end_dt'],
+            'location': row['room_parsed'],
+        })
+
+    cal_df = pd.DataFrame(cal_events)
+    cal_df.sort_values(inplace=True, by=['start_dt', 'end_dt', 'subject', 'location'])
+    return cal_df
+
+def calendar_df_to_ics(df):
+    c = icalendar.Calendar()
+    c.add('version', '2.0')
+    c.add('prodid', '-//mpoquet//survival//')
+    for _, row in df.iterrows():
+        event = icalendar.Event()
+        event.add('name', icalendar.vText(row['subject']))
+        #event.add('description', icalendar.vText(row['subject']))
+        event.add('summary', icalendar.vText(row['subject']))
+        #event.add('dtstamp', datetime.datetime(2000, 1, 1, 0, 0, 0))
+        event.add('dtstart', row['start_dt'].tz_localize(tz='Europe/Paris'))
+        event.add('dtend', row['end_dt'].tz_localize(tz='Europe/Paris'))
+        event.add('location', icalendar.vText(row['location']))
+        s = "".join([
+            f"{row['subject']}",
+            f"{row['start_dt']}",
+            f"{row['end_dt']}",
+            f"{row['location']}",
+        ])
+        event_hash = hashlib.md5(s.encode('utf-8')).hexdigest()
+        event.add('uid', f"{event_hash}@pff")
+        c.add_component(event)
+    return c
diff --git a/lflex_celcat_survival/ics.py b/lflex_celcat_survival/ics.py
deleted file mode 100644
index 6438713fa9359a238160f2d325a19596af4aef5f..0000000000000000000000000000000000000000
--- a/lflex_celcat_survival/ics.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import ics
-
-def course_df_to_ics(df):
-    c = ics.Calendar()
-    for _, row in df.iterrows():
-        event = ics.Event(
-            name = f'{row["module_readable"]} - {row["course_type"]} - {row["groups_parsed"]} - {row["room_parsed"]}',
-            begin = row['start'].tz_localize(tz='Europe/Paris'),
-            end = row['end'].tz_localize(tz='Europe/Paris'),
-        )
-        if row['room_parsed'] != 'unset':
-            event.location = row['room_parsed']
-        c.events.add(event)
-
-    return c
diff --git a/lflex_celcat_survival/slot_parse.py b/lflex_celcat_survival/slot_parse.py
new file mode 100644
index 0000000000000000000000000000000000000000..30916f8ebed8962e170696b7ccda019e894112c6
--- /dev/null
+++ b/lflex_celcat_survival/slot_parse.py
@@ -0,0 +1,139 @@
+import datetime
+import itertools
+import pandas as pd
+import re
+from procset import ProcSet
+
+SLOT_RE_TEMPLATE = r'^(?P<weekday>WEEKDAYLIST)(?P<hour>\d{2})h(?P<minute>\d{2})?$'
+DURATION_RE_STR = r'^(?P<hour>\d{1,2})h(?P<minute>\d{1,2})?$'
+DURATION_RE = re.compile(DURATION_RE_STR)
+ACADEMIC_YEAR_RE_STR = r'^(?P<beginyear>\d{4})-(?P<endyear>\d{4})$'
+ACADEMIC_YEAR_RE = re.compile(ACADEMIC_YEAR_RE_STR)
+
+def gen_parsable_weekdays(lang, nb_char):
+    '''
+    Generate a list of truncated weekdays, and a string->isoweekday map to parse & interpret results
+
+    Args:
+        lang: The language to use, such as 'fr' for French or 'en' for English.
+        nb_char: The number of characters to use to represent each week day.
+
+    Returns:
+        list(str): The ordered list of truncated week day names. In iso order (Monday to Sunday).
+        dict(str, int): A map from truncated week day names to their iso number (1 is Monday, 7 is Sunday).
+    '''
+    lang_to_weekdays = {
+        'en': ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'],
+        'fr': ['Lundi', 'Mardi', 'Mercredi', 'Jeudi', 'Vendredi', 'Samedi', 'Dimanche'],
+    }
+
+    if lang not in lang_to_weekdays:
+        raise ValueError(f'lang={lang} is not supported. supported languages: {sorted(list(lang_to_weekdays.keys()))}')
+    weekdays = lang_to_weekdays[lang]
+
+    trunc_weekdays = [wd[:nb_char] for wd in weekdays]
+    assert len(set(trunc_weekdays)) == len(trunc_weekdays), f"Invalid weekday format: using lang={lang} with {nb_char} characters creates non-unique truncated weekdays {trunc_weekdays}"
+    trunc_weekdays_map = {wd: num+1 for num, wd in enumerate(trunc_weekdays)}
+    return trunc_weekdays, trunc_weekdays_map
+
+def gen_slot_parser(lang, weekday_nb_char):
+    '''
+    Generate a parser (compiled regex and truncated weekday name to iso weekday map) for a given lang and number of characters per weekday
+
+    Args:
+        lang: The language to use, such as 'fr' for French or 'en' for English.
+        nb_char: The number of characters to use to represent each week day.
+
+    Returns:
+        re.Pattern: The compiled regular expression that can parse a slot.
+        dict(str, int): A map from truncated week day names to their iso number (1 is Monday, 7 is Sunday).
+    '''
+    weekdays, weekday_parse_map = gen_parsable_weekdays(lang, weekday_nb_char)
+
+    daylist = '|'.join(weekdays)
+    re_str = SLOT_RE_TEMPLATE.replace('WEEKDAYLIST', daylist)
+    r = re.compile(re_str)
+    return r, weekday_parse_map
+
+def slot_to_dt(slot: str, year: int, week: int, re_parser: re.Pattern, wd_iso_map: dict[str, int]):
+    '''
+    Generate a time point (datetime) from a slot and context (year, int) and parsing information
+    '''
+    m = re_parser.match(slot)
+    if m is None:
+        raise ValueError(f"Slot '{slot}' could not be parsed")
+
+    wd_iso = wd_iso_map[m['weekday']]
+    hours = int(m['hour'])
+    minutes = m['minute'] or '0'
+    minutes = int(minutes)
+
+    dt = datetime.datetime.fromisocalendar(year, week, wd_iso)
+    dt = dt + datetime.timedelta(hours=hours, minutes=minutes)
+    return dt
+
+def duration_to_timedelta(duration: str):
+    '''
+    Parse a string duration to a timedelta.
+    '''
+    m = DURATION_RE.match(duration)
+    if m is None:
+        raise ValueError(f"Duration '{duration}' could not be parsed")
+
+    hours = int(m['hour'])
+    minutes = m['minute'] or '0'
+    minutes = int(minutes)
+
+    delta = datetime.timedelta(hours=hours, minutes=minutes)
+    return delta
+
+def year_from_academic_year_week(academic_year, week, week_cut=32):
+    '''
+    Determine the year to use of an (academic year, week) tuple depending on whether week is before of after cut
+    '''
+    m = ACADEMIC_YEAR_RE.match(academic_year)
+    if m is None:
+        raise ValueError(f"Academic year '{academic_year}' could not be parsed")
+
+    begin_year = int(m['beginyear'])
+    end_year = int(m['endyear'])
+    if end_year != begin_year + 1:
+        raise ValueError(f"Invalid academic year '{academic_year}': years should be consecutive")
+
+    if week <= week_cut:
+        return end_year
+    return begin_year
+
+def read_weekslot_csv(filename, slot_lang, slot_nb_char):
+    col_types = {
+        'mod_code': str,
+        'display_name': str,
+        'student_group': str,
+        'slots': str,
+        'duration': str,
+        'academic_year': str,
+        'weeks': str,
+    }
+    df = pd.read_csv(filename, dtype=col_types)
+
+    re_parser, wd_iso_map = gen_slot_parser(slot_lang, slot_nb_char)
+
+    flat_slots = []
+    for index, row in df.iterrows():
+        slots = row['slots'].split()
+        weeks = ProcSet.from_str(row['weeks'])
+
+        for slot, week in itertools.product(slots, weeks):
+            year = year_from_academic_year_week(row['academic_year'], week)
+            dt_begin = slot_to_dt(slot, year, week, re_parser, wd_iso_map)
+            dt_end = dt_begin + duration_to_timedelta(row['duration'])
+            flat_slots.append({
+                'mod_code': row['mod_code'],
+                'display_name': row['display_name'],
+                'student_group': row['student_group'],
+                'start_dt': dt_begin,
+                'end_dt': dt_end,
+            })
+
+    flat_df = pd.DataFrame(flat_slots)
+    return flat_df
diff --git a/pyproject.toml b/pyproject.toml
index 4113012610c4b9318f1fc83c286ab49279396319..1f2283eb827bc72b376c50050dd3f5de3a2b0eba 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -22,11 +22,12 @@ classifiers = [
 ]
 
 dependencies = [
-    "ics>=0.7.0",
+    "icalendar>=4.0.0",
     "pandas>=1.3.0",
     "requests>=2.26.0",
     "click>=8.0.0",
-    "beautifulsoup4>=4.10.0"
+    "beautifulsoup4>=4.10.0",
+    "procset>=1.0"
 ]
 
 [project.scripts]